Skip to content

model ¤

implementaions of all released minor versions are available in submodules: - model v0_4: bioimageio.spec.model.v0_4.ModelDescr - model v0_5: bioimageio.spec.model.v0_5.ModelDescr

Modules:

Attributes:

AnyModelDescr module-attribute ¤

AnyModelDescr = Annotated[
    Union[
        Annotated[
            ModelDescr_v0_4, Field(title="model 0.4")
        ],
        Annotated[
            ModelDescr_v0_5, Field(title="model 0.5")
        ],
    ],
    Discriminator("format_version"),
    Field(title="model"),
]

ModelDescr module-attribute ¤

ModelDescr = v0_5.ModelDescr

ModelDescr_v0_4 module-attribute ¤

ModelDescr_v0_4 = v0_4.ModelDescr

ModelDescr_v0_5 module-attribute ¤

ModelDescr_v0_5 = v0_5.ModelDescr

_v0_3_converter ¤

Functions:

convert_model_from_v0_3_to_0_4_0 ¤

convert_model_from_v0_3_to_0_4_0(
    data: Dict[str, Any],
) -> None

auto converts model 'data' to newest format

Source code in src/bioimageio/spec/model/_v0_3_converter.py
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def convert_model_from_v0_3_to_0_4_0(data: Dict[str, Any]) -> None:
    """auto converts model 'data' to newest format"""

    if "format_version" not in data:
        return

    if data["format_version"] == "0.3.0":
        # no breaking change, bump to 0.3.1
        data["format_version"] = "0.3.1"

    if data["format_version"] == "0.3.1":
        data = _convert_model_v0_3_1_to_v0_3_2(data)

    if data["format_version"] == "0.3.2":
        data = _convert_model_v0_3_2_to_v0_3_3(data)

    if data["format_version"] in ("0.3.3", "0.3.4", "0.3.5"):
        data["format_version"] = "0.3.6"

    if data["format_version"] != "0.3.6":
        return

    # remove 'future' from config if no other than the used future entries exist
    config = data.get("config", {})
    if config.get("future") == {}:
        del config["future"]

    # remove 'config' if now empty
    if data.get("config") == {}:
        del data["config"]

    data.pop("language", None)
    data.pop("framework", None)

    architecture = data.pop("source", None)
    architecture_sha256 = data.pop("sha256", None)
    kwargs = data.pop("kwargs", None)
    pytorch_state_dict_weights_entry = data.get("weights", {}).get("pytorch_state_dict")
    if pytorch_state_dict_weights_entry is not None:
        if architecture is not None:
            pytorch_state_dict_weights_entry["architecture"] = architecture

        if architecture_sha256 is not None:
            pytorch_state_dict_weights_entry["architecture_sha256"] = (
                architecture_sha256
            )

        if kwargs is not None:
            pytorch_state_dict_weights_entry["kwargs"] = kwargs

    torchscript_weights_entry = data.get("weights", {}).pop("pytorch_script", None)
    if torchscript_weights_entry is not None:
        data.setdefault("weights", {})["torchscript"] = torchscript_weights_entry

    data["format_version"] = "0.4.0"

_v0_4_converter ¤

Functions:

convert_from_older_format ¤

convert_from_older_format(
    data: BioimageioYamlContent,
) -> None
Source code in src/bioimageio/spec/model/_v0_4_converter.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
def convert_from_older_format(data: BioimageioYamlContent) -> None:
    fv = data.get("format_version")
    if not isinstance(fv, str):
        return

    major_minor = tuple(map(int, fv.split(".")[:2]))
    if major_minor < (0, 4):
        convert_model_from_v0_3_to_0_4_0(data)
    elif major_minor > (0, 4):
        return

    if data["format_version"] == "0.4.0":
        _convert_model_from_v0_4_0_to_0_4_1(data)

    if data["format_version"] in ("0.4.1", "0.4.2", "0.4.3", "0.4.4"):
        _convert_model_from_v0_4_4_to_0_4_5(data)

    if data["format_version"] in ("0.4.5", "0.4.6"):
        remove_slashes_from_names(data)
        data["format_version"] = "0.4.7"

    if data["format_version"] in ("0.4.7", "0.4.8"):
        data["format_version"] = "0.4.9"

    if data["format_version"] == "0.4.9":
        if isinstance(config := data.get("config"), dict) and isinstance(
            bconfig := config.get("bioimageio"), dict
        ):
            if (nickname := bconfig.get("nickname")) is not None:
                data["id"] = nickname

            if (nickname_icon := bconfig.get("nickname_icon")) is not None:
                data["id_emoji"] = nickname_icon

        data["format_version"] = "0.4.10"

    remove_doi_prefix(data)
    remove_gh_prefix(data)
    # remove 'future' from config if no other than the used future entries exist
    config = data.get("config", {})
    if isinstance(config, dict) and config.get("future") == {}:
        del config["future"]

    # remove 'config' if now empty
    if data.get("config") == {}:
        del data["config"]

v0_4 ¤

Classes:

Functions:

Attributes:

AxesInCZYX module-attribute ¤

AxesInCZYX = Annotated[
    str,
    RestrictCharacters("czyx"),
    AfterValidator(validate_unique_entries),
]

AxesStr module-attribute ¤

AxesStr = Annotated[
    str,
    RestrictCharacters("bitczyx"),
    AfterValidator(validate_unique_entries),
]

CustomCallable module-attribute ¤

CustomCallable = Annotated[
    Union[CallableFromFile, CallableFromDepencency],
    Field(union_mode="left_to_right"),
]

KnownRunMode module-attribute ¤

KnownRunMode = Literal['deepimagej']

PostprocessingDescr module-attribute ¤

PostprocessingName module-attribute ¤

PostprocessingName = Literal[
    "binarize",
    "clip",
    "scale_linear",
    "sigmoid",
    "zero_mean_unit_variance",
    "scale_range",
    "scale_mean_variance",
]

PreprocessingDescr module-attribute ¤

PreprocessingName module-attribute ¤

PreprocessingName = Literal[
    "binarize",
    "clip",
    "scale_linear",
    "sigmoid",
    "zero_mean_unit_variance",
    "scale_range",
]

BinarizeDescr pydantic-model ¤

Bases: ProcessingDescrBase

BinarizeDescr the tensor with a fixed BinarizeKwargs.threshold. Values above the threshold will be set to one, values below the threshold to zero.

Show JSON schema:
{
  "$defs": {
    "BinarizeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold",
          "title": "Threshold",
          "type": "number"
        }
      },
      "required": [
        "threshold"
      ],
      "title": "model.v0_4.BinarizeKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "BinarizeDescr the tensor with a fixed `BinarizeKwargs.threshold`.\nValues above the threshold will be set to one, values below the threshold to zero.",
  "properties": {
    "name": {
      "const": "binarize",
      "title": "Name",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/BinarizeKwargs"
    }
  },
  "required": [
    "name",
    "kwargs"
  ],
  "title": "model.v0_4.BinarizeDescr",
  "type": "object"
}

Fields:

implemented_name class-attribute ¤

implemented_name: Literal['binarize'] = 'binarize'

kwargs pydantic-field ¤

name pydantic-field ¤

name: Literal['binarize'] = 'binarize'

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

BinarizeKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for BinarizeDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `BinarizeDescr`",
  "properties": {
    "threshold": {
      "description": "The fixed threshold",
      "title": "Threshold",
      "type": "number"
    }
  },
  "required": [
    "threshold"
  ],
  "title": "model.v0_4.BinarizeKwargs",
  "type": "object"
}

Fields:

threshold pydantic-field ¤

threshold: float

The fixed threshold

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

CallableFromDepencency ¤

Bases: ValidatedStringWithInnerNode[CallableFromDepencencyNode]


              flowchart TD
              bioimageio.spec.model.v0_4.CallableFromDepencency[CallableFromDepencency]
              bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode[ValidatedStringWithInnerNode]
              bioimageio.spec._internal.validated_string.ValidatedString[ValidatedString]

                              bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode --> bioimageio.spec.model.v0_4.CallableFromDepencency
                                bioimageio.spec._internal.validated_string.ValidatedString --> bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode
                



              click bioimageio.spec.model.v0_4.CallableFromDepencency href "" "bioimageio.spec.model.v0_4.CallableFromDepencency"
              click bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode href "" "bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode"
              click bioimageio.spec._internal.validated_string.ValidatedString href "" "bioimageio.spec._internal.validated_string.ValidatedString"
            

Methods:

Attributes:

callable_name property ¤

callable_name

The callable Python identifier implemented in module module_name.

module_name property ¤

module_name

The Python module that implements callable_name.

root_model class-attribute ¤

root_model: Type[RootModel[Any]] = RootModel[
    Annotated[
        str,
        StringConstraints(
            strip_whitespace=True, pattern="^.+\\..+$"
        ),
    ]
]

__get_pydantic_core_schema__ classmethod ¤

__get_pydantic_core_schema__(
    source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema
Source code in src/bioimageio/spec/_internal/validated_string.py
29
30
31
32
33
@classmethod
def __get_pydantic_core_schema__(
    cls, source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema:
    return no_info_after_validator_function(cls, handler(str))

__get_pydantic_json_schema__ classmethod ¤

__get_pydantic_json_schema__(
    core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue
Source code in src/bioimageio/spec/_internal/validated_string.py
35
36
37
38
39
40
41
42
43
44
@classmethod
def __get_pydantic_json_schema__(
    cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
    json_schema = cls.root_model.model_json_schema(mode=handler.mode)
    json_schema["title"] = cls.__name__.strip("_")
    if cls.__doc__:
        json_schema["description"] = cls.__doc__

    return json_schema

__new__ ¤

__new__(object: object)
Source code in src/bioimageio/spec/_internal/validated_string.py
19
20
21
22
23
def __new__(cls, object: object):
    _validated = cls.root_model.model_validate(object).root
    self = super().__new__(cls, _validated)
    self._validated = _validated
    return self._after_validator()

CallableFromDepencencyNode pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "module_name": {
      "description": "The Python module that implements **callable_name**.",
      "title": "Module Name",
      "type": "string"
    },
    "callable_name": {
      "description": "The callable Python identifier implemented in module **module_name**.",
      "minLength": 1,
      "title": "Identifier",
      "type": "string"
    }
  },
  "required": [
    "module_name",
    "callable_name"
  ],
  "title": "model.v0_4.CallableFromDepencencyNode",
  "type": "object"
}

Fields:

Validators:

callable_name pydantic-field ¤

callable_name: Identifier

The callable Python identifier implemented in module module_name.

module_name pydantic-field ¤

module_name: str

The Python module that implements callable_name.

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

CallableFromFile ¤

Bases: ValidatedStringWithInnerNode[CallableFromFileNode]


              flowchart TD
              bioimageio.spec.model.v0_4.CallableFromFile[CallableFromFile]
              bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode[ValidatedStringWithInnerNode]
              bioimageio.spec._internal.validated_string.ValidatedString[ValidatedString]

                              bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode --> bioimageio.spec.model.v0_4.CallableFromFile
                                bioimageio.spec._internal.validated_string.ValidatedString --> bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode
                



              click bioimageio.spec.model.v0_4.CallableFromFile href "" "bioimageio.spec.model.v0_4.CallableFromFile"
              click bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode href "" "bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode"
              click bioimageio.spec._internal.validated_string.ValidatedString href "" "bioimageio.spec._internal.validated_string.ValidatedString"
            

Methods:

Attributes:

callable_name property ¤

callable_name

The callable Python identifier implemented in source_file.

root_model class-attribute ¤

root_model: Type[RootModel[Any]] = RootModel[
    Annotated[
        str,
        StringConstraints(
            strip_whitespace=True, pattern="^.+:.+$"
        ),
    ]
]

source_file property ¤

source_file

The Python source file that implements callable_name.

__get_pydantic_core_schema__ classmethod ¤

__get_pydantic_core_schema__(
    source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema
Source code in src/bioimageio/spec/_internal/validated_string.py
29
30
31
32
33
@classmethod
def __get_pydantic_core_schema__(
    cls, source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema:
    return no_info_after_validator_function(cls, handler(str))

__get_pydantic_json_schema__ classmethod ¤

__get_pydantic_json_schema__(
    core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue
Source code in src/bioimageio/spec/_internal/validated_string.py
35
36
37
38
39
40
41
42
43
44
@classmethod
def __get_pydantic_json_schema__(
    cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
    json_schema = cls.root_model.model_json_schema(mode=handler.mode)
    json_schema["title"] = cls.__name__.strip("_")
    if cls.__doc__:
        json_schema["description"] = cls.__doc__

    return json_schema

__new__ ¤

__new__(object: object)
Source code in src/bioimageio/spec/_internal/validated_string.py
19
20
21
22
23
def __new__(cls, object: object):
    _validated = cls.root_model.model_validate(object).root
    self = super().__new__(cls, _validated)
    self._validated = _validated
    return self._after_validator()

CallableFromFileNode pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "$defs": {
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source_file": {
      "anyOf": [
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        }
      ],
      "description": "The Python source file that implements **callable_name**.",
      "title": "Source File"
    },
    "callable_name": {
      "description": "The callable Python identifier implemented in **source_file**.",
      "minLength": 1,
      "title": "Identifier",
      "type": "string"
    }
  },
  "required": [
    "source_file",
    "callable_name"
  ],
  "title": "model.v0_4.CallableFromFileNode",
  "type": "object"
}

Fields:

callable_name pydantic-field ¤

callable_name: Identifier

The callable Python identifier implemented in source_file.

source_file pydantic-field ¤

source_file: Annotated[
    Union[RelativeFilePath, HttpUrl],
    Field(union_mode="left_to_right"),
    include_in_package,
]

The Python source file that implements callable_name.

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ClipDescr pydantic-model ¤

Bases: ProcessingDescrBase

Clip tensor values to a range.

Set tensor values below ClipKwargs.min to ClipKwargs.min and above ClipKwargs.max to ClipKwargs.max.

Show JSON schema:
{
  "$defs": {
    "ClipKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ClipDescr`",
      "properties": {
        "min": {
          "description": "minimum value for clipping",
          "title": "Min",
          "type": "number"
        },
        "max": {
          "description": "maximum value for clipping",
          "title": "Max",
          "type": "number"
        }
      },
      "required": [
        "min",
        "max"
      ],
      "title": "model.v0_4.ClipKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Clip tensor values to a range.\n\nSet tensor values below `ClipKwargs.min` to `ClipKwargs.min`\nand above `ClipKwargs.max` to `ClipKwargs.max`.",
  "properties": {
    "name": {
      "const": "clip",
      "title": "Name",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/ClipKwargs"
    }
  },
  "required": [
    "name",
    "kwargs"
  ],
  "title": "model.v0_4.ClipDescr",
  "type": "object"
}

Fields:

implemented_name class-attribute ¤

implemented_name: Literal['clip'] = 'clip'

kwargs pydantic-field ¤

kwargs: ClipKwargs

name pydantic-field ¤

name: Literal['clip'] = 'clip'

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ClipKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for ClipDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `ClipDescr`",
  "properties": {
    "min": {
      "description": "minimum value for clipping",
      "title": "Min",
      "type": "number"
    },
    "max": {
      "description": "maximum value for clipping",
      "title": "Max",
      "type": "number"
    }
  },
  "required": [
    "min",
    "max"
  ],
  "title": "model.v0_4.ClipKwargs",
  "type": "object"
}

Fields:

max pydantic-field ¤

max: float

maximum value for clipping

min pydantic-field ¤

min: float

minimum value for clipping

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

Dependencies ¤

Bases: ValidatedStringWithInnerNode[DependenciesNode]


              flowchart TD
              bioimageio.spec.model.v0_4.Dependencies[Dependencies]
              bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode[ValidatedStringWithInnerNode]
              bioimageio.spec._internal.validated_string.ValidatedString[ValidatedString]

                              bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode --> bioimageio.spec.model.v0_4.Dependencies
                                bioimageio.spec._internal.validated_string.ValidatedString --> bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode
                



              click bioimageio.spec.model.v0_4.Dependencies href "" "bioimageio.spec.model.v0_4.Dependencies"
              click bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode href "" "bioimageio.spec._internal.validated_string_with_inner_node.ValidatedStringWithInnerNode"
              click bioimageio.spec._internal.validated_string.ValidatedString href "" "bioimageio.spec._internal.validated_string.ValidatedString"
            

Methods:

Attributes:

file property ¤

file

Dependency file

manager property ¤

manager

Dependency manager

root_model class-attribute ¤

root_model: Type[RootModel[Any]] = RootModel[
    Annotated[
        str,
        StringConstraints(
            strip_whitespace=True, pattern="^.+:.+$"
        ),
    ]
]

__get_pydantic_core_schema__ classmethod ¤

__get_pydantic_core_schema__(
    source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema
Source code in src/bioimageio/spec/_internal/validated_string.py
29
30
31
32
33
@classmethod
def __get_pydantic_core_schema__(
    cls, source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema:
    return no_info_after_validator_function(cls, handler(str))

__get_pydantic_json_schema__ classmethod ¤

__get_pydantic_json_schema__(
    core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue
Source code in src/bioimageio/spec/_internal/validated_string.py
35
36
37
38
39
40
41
42
43
44
@classmethod
def __get_pydantic_json_schema__(
    cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
    json_schema = cls.root_model.model_json_schema(mode=handler.mode)
    json_schema["title"] = cls.__name__.strip("_")
    if cls.__doc__:
        json_schema["description"] = cls.__doc__

    return json_schema

__new__ ¤

__new__(object: object)
Source code in src/bioimageio/spec/_internal/validated_string.py
19
20
21
22
23
def __new__(cls, object: object):
    _validated = cls.root_model.model_validate(object).root
    self = super().__new__(cls, _validated)
    self._validated = _validated
    return self._after_validator()

DependenciesNode pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "$defs": {
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    }
  },
  "additionalProperties": false,
  "properties": {
    "manager": {
      "description": "Dependency manager",
      "examples": [
        "conda",
        "maven",
        "pip"
      ],
      "minLength": 1,
      "title": "Manager",
      "type": "string"
    },
    "file": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "Dependency file",
      "title": "File"
    }
  },
  "required": [
    "manager",
    "file"
  ],
  "title": "model.v0_4.DependenciesNode",
  "type": "object"
}

Fields:

  • manager (Annotated[NotEmpty[str], Field(examples=['conda', 'maven', 'pip'])])
  • file (FileSource_)

file pydantic-field ¤

file: FileSource_

Dependency file

manager pydantic-field ¤

manager: Annotated[
    NotEmpty[str], Field(examples=["conda", "maven", "pip"])
]

Dependency manager

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ImplicitOutputShape pydantic-model ¤

Bases: Node

Output tensor shape depending on an input tensor shape. shape(output_tensor) = shape(input_tensor) * scale + 2 * offset

Show JSON schema:
{
  "additionalProperties": false,
  "description": "Output tensor shape depending on an input tensor shape.\n`shape(output_tensor) = shape(input_tensor) * scale + 2 * offset`",
  "properties": {
    "reference_tensor": {
      "description": "Name of the reference tensor.",
      "minLength": 1,
      "title": "TensorName",
      "type": "string"
    },
    "scale": {
      "description": "output_pix/input_pix for each dimension.\n'null' values indicate new dimensions, whose length is defined by 2*`offset`",
      "items": {
        "anyOf": [
          {
            "type": "number"
          },
          {
            "type": "null"
          }
        ]
      },
      "minItems": 1,
      "title": "Scale",
      "type": "array"
    },
    "offset": {
      "description": "Position of origin wrt to input.",
      "items": {
        "anyOf": [
          {
            "type": "integer"
          },
          {
            "multipleOf": 0.5,
            "type": "number"
          }
        ]
      },
      "minItems": 1,
      "title": "Offset",
      "type": "array"
    }
  },
  "required": [
    "reference_tensor",
    "scale",
    "offset"
  ],
  "title": "model.v0_4.ImplicitOutputShape",
  "type": "object"
}

Fields:

Validators:

offset pydantic-field ¤

offset: NotEmpty[
    List[Union[int, Annotated[float, MultipleOf(0.5)]]]
]

Position of origin wrt to input.

reference_tensor pydantic-field ¤

reference_tensor: TensorName

Name of the reference tensor.

scale pydantic-field ¤

scale: NotEmpty[List[Optional[float]]]

output_pix/input_pix for each dimension. 'null' values indicate new dimensions, whose length is defined by 2*offset

__len__ ¤

__len__() -> int
Source code in src/bioimageio/spec/model/v0_4.py
581
582
def __len__(self) -> int:
    return len(self.scale)

matching_lengths pydantic-validator ¤

matching_lengths() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
584
585
586
587
588
589
590
591
592
593
594
595
@model_validator(mode="after")
def matching_lengths(self) -> Self:
    if len(self.scale) != len(self.offset):
        raise ValueError(
            f"scale {self.scale} has to have same length as offset {self.offset}!"
        )
    # if we have an expanded dimension, make sure that it's offet is not zero
    for sc, off in zip(self.scale, self.offset):
        if sc is None and not off:
            raise ValueError("`offset` must not be zero if `scale` is none/zero")

    return self

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

InputTensorDescr pydantic-model ¤

Bases: TensorDescrBase

Show JSON schema:
{
  "$defs": {
    "BinarizeDescr": {
      "additionalProperties": false,
      "description": "BinarizeDescr the tensor with a fixed `BinarizeKwargs.threshold`.\nValues above the threshold will be set to one, values below the threshold to zero.",
      "properties": {
        "name": {
          "const": "binarize",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/BinarizeKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.BinarizeDescr",
      "type": "object"
    },
    "BinarizeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold",
          "title": "Threshold",
          "type": "number"
        }
      },
      "required": [
        "threshold"
      ],
      "title": "model.v0_4.BinarizeKwargs",
      "type": "object"
    },
    "ClipDescr": {
      "additionalProperties": false,
      "description": "Clip tensor values to a range.\n\nSet tensor values below `ClipKwargs.min` to `ClipKwargs.min`\nand above `ClipKwargs.max` to `ClipKwargs.max`.",
      "properties": {
        "name": {
          "const": "clip",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ClipKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ClipDescr",
      "type": "object"
    },
    "ClipKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ClipDescr`",
      "properties": {
        "min": {
          "description": "minimum value for clipping",
          "title": "Min",
          "type": "number"
        },
        "max": {
          "description": "maximum value for clipping",
          "title": "Max",
          "type": "number"
        }
      },
      "required": [
        "min",
        "max"
      ],
      "title": "model.v0_4.ClipKwargs",
      "type": "object"
    },
    "ParameterizedInputShape": {
      "additionalProperties": false,
      "description": "A sequence of valid shapes given by `shape_k = min + k * step for k in {0, 1, ...}`.",
      "properties": {
        "min": {
          "description": "The minimum input shape",
          "items": {
            "type": "integer"
          },
          "minItems": 1,
          "title": "Min",
          "type": "array"
        },
        "step": {
          "description": "The minimum shape change",
          "items": {
            "type": "integer"
          },
          "minItems": 1,
          "title": "Step",
          "type": "array"
        }
      },
      "required": [
        "min",
        "step"
      ],
      "title": "model.v0_4.ParameterizedInputShape",
      "type": "object"
    },
    "ScaleLinearDescr": {
      "additionalProperties": false,
      "description": "Fixed linear scaling.",
      "properties": {
        "name": {
          "const": "scale_linear",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleLinearKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ScaleLinearDescr",
      "type": "object"
    },
    "ScaleLinearKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleLinearDescr`",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to scale jointly.\nFor example xy to scale the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes"
        },
        "gain": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "type": "array"
            }
          ],
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "type": "array"
            }
          ],
          "default": 0.0,
          "description": "additive term",
          "title": "Offset"
        }
      },
      "title": "model.v0_4.ScaleLinearKwargs",
      "type": "object"
    },
    "ScaleRangeDescr": {
      "additionalProperties": false,
      "description": "Scale with percentiles.",
      "properties": {
        "name": {
          "const": "scale_range",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleRangeKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ScaleRangeDescr",
      "type": "object"
    },
    "ScaleRangeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleRangeDescr`\n\nFor `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)\nthis processing step normalizes data to the [0, 1] intervall.\nFor other percentiles the normalized values will partially be outside the [0, 1]\nintervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the\nnormalized values to a range.",
      "properties": {
        "mode": {
          "description": "Mode for computing percentiles.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n| per_dataset | compute for the entire dataset       |\n| per_sample  | compute for each sample individually |",
          "enum": [
            "per_dataset",
            "per_sample"
          ],
          "title": "Mode",
          "type": "string"
        },
        "axes": {
          "description": "The subset of axes to normalize jointly.\nFor example xy to normalize the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes",
          "type": "string"
        },
        "min_percentile": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "number"
            }
          ],
          "default": 0.0,
          "description": "The lower percentile used to determine the value to align with zero.",
          "ge": 0,
          "lt": 100,
          "title": "Min Percentile"
        },
        "max_percentile": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "number"
            }
          ],
          "default": 100.0,
          "description": "The upper percentile used to determine the value to align with one.\nHas to be bigger than `min_percentile`.\nThe range is 1 to 100 instead of 0 to 100 to avoid mistakenly\naccepting percentiles specified in the range 0.0 to 1.0.",
          "gt": 1,
          "le": 100,
          "title": "Max Percentile"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability.\n`out = (tensor - v_lower) / (v_upper - v_lower + eps)`;\nwith `v_lower,v_upper` values at the respective percentiles.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        },
        "reference_tensor": {
          "anyOf": [
            {
              "minLength": 1,
              "title": "TensorName",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tensor name to compute the percentiles from. Default: The tensor itself.\nFor any tensor in `inputs` only input tensor references are allowed.\nFor a tensor in `outputs` only input tensor refereences are allowed if `mode: per_dataset`",
          "title": "Reference Tensor"
        }
      },
      "required": [
        "mode",
        "axes"
      ],
      "title": "model.v0_4.ScaleRangeKwargs",
      "type": "object"
    },
    "SigmoidDescr": {
      "additionalProperties": false,
      "description": "The logistic sigmoid funciton, a.k.a. expit function.",
      "properties": {
        "name": {
          "const": "sigmoid",
          "title": "Name",
          "type": "string"
        }
      },
      "required": [
        "name"
      ],
      "title": "model.v0_4.SigmoidDescr",
      "type": "object"
    },
    "ZeroMeanUnitVarianceDescr": {
      "additionalProperties": false,
      "description": "Subtract mean and divide by variance.",
      "properties": {
        "name": {
          "const": "zero_mean_unit_variance",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ZeroMeanUnitVarianceKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ZeroMeanUnitVarianceDescr",
      "type": "object"
    },
    "ZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ZeroMeanUnitVarianceDescr`",
      "properties": {
        "mode": {
          "default": "fixed",
          "description": "Mode for computing mean and variance.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n|   fixed     | Fixed values for mean and variance   |\n| per_dataset | Compute for the entire dataset       |\n| per_sample  | Compute for each sample individually |",
          "enum": [
            "fixed",
            "per_dataset",
            "per_sample"
          ],
          "title": "Mode",
          "type": "string"
        },
        "axes": {
          "description": "The subset of axes to normalize jointly.\nFor example `xy` to normalize the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes",
          "type": "string"
        },
        "mean": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The mean value(s) to use for `mode: fixed`.\nFor example `[1.1, 2.2, 3.3]` in the case of a 3 channel image with `axes: xy`.",
          "examples": [
            [
              1.1,
              2.2,
              3.3
            ]
          ],
          "title": "Mean"
        },
        "std": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The standard deviation values to use for `mode: fixed`. Analogous to mean.",
          "examples": [
            [
              0.1,
              0.2,
              0.3
            ]
          ],
          "title": "Std"
        },
        "eps": {
          "default": 1e-06,
          "description": "epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "required": [
        "axes"
      ],
      "title": "model.v0_4.ZeroMeanUnitVarianceKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "name": {
      "description": "Tensor name. No duplicates are allowed.",
      "minLength": 1,
      "title": "TensorName",
      "type": "string"
    },
    "description": {
      "default": "",
      "title": "Description",
      "type": "string"
    },
    "axes": {
      "description": "Axes identifying characters. Same length and order as the axes in `shape`.\n| axis | description |\n| --- | --- |\n|  b  |  batch (groups multiple samples) |\n|  i  |  instance/index/element |\n|  t  |  time |\n|  c  |  channel |\n|  z  |  spatial dimension z |\n|  y  |  spatial dimension y |\n|  x  |  spatial dimension x |",
      "title": "Axes",
      "type": "string"
    },
    "data_range": {
      "anyOf": [
        {
          "maxItems": 2,
          "minItems": 2,
          "prefixItems": [
            {
              "type": "number"
            },
            {
              "type": "number"
            }
          ],
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.\nIf not specified, the full data range that can be expressed in `data_type` is allowed.",
      "title": "Data Range"
    },
    "data_type": {
      "description": "For now an input tensor is expected to be given as `float32`.\nThe data flow in bioimage.io models is explained\n[in this diagram.](https://docs.google.com/drawings/d/1FTw8-Rn6a6nXdkZ_SkMumtcjvur9mtIhRqLwnKqZNHM/edit).",
      "enum": [
        "float32",
        "uint8",
        "uint16"
      ],
      "title": "Data Type",
      "type": "string"
    },
    "shape": {
      "anyOf": [
        {
          "items": {
            "type": "integer"
          },
          "type": "array"
        },
        {
          "$ref": "#/$defs/ParameterizedInputShape"
        }
      ],
      "description": "Specification of input tensor shape.",
      "examples": [
        [
          1,
          512,
          512,
          1
        ],
        {
          "min": [
            1,
            64,
            64,
            1
          ],
          "step": [
            0,
            32,
            32,
            0
          ]
        }
      ],
      "title": "Shape"
    },
    "preprocessing": {
      "description": "Description of how this input should be preprocessed.",
      "items": {
        "discriminator": {
          "mapping": {
            "binarize": "#/$defs/BinarizeDescr",
            "clip": "#/$defs/ClipDescr",
            "scale_linear": "#/$defs/ScaleLinearDescr",
            "scale_range": "#/$defs/ScaleRangeDescr",
            "sigmoid": "#/$defs/SigmoidDescr",
            "zero_mean_unit_variance": "#/$defs/ZeroMeanUnitVarianceDescr"
          },
          "propertyName": "name"
        },
        "oneOf": [
          {
            "$ref": "#/$defs/BinarizeDescr"
          },
          {
            "$ref": "#/$defs/ClipDescr"
          },
          {
            "$ref": "#/$defs/ScaleLinearDescr"
          },
          {
            "$ref": "#/$defs/SigmoidDescr"
          },
          {
            "$ref": "#/$defs/ZeroMeanUnitVarianceDescr"
          },
          {
            "$ref": "#/$defs/ScaleRangeDescr"
          }
        ]
      },
      "title": "Preprocessing",
      "type": "array"
    }
  },
  "required": [
    "name",
    "axes",
    "data_type",
    "shape"
  ],
  "title": "model.v0_4.InputTensorDescr",
  "type": "object"
}

Fields:

Validators:

axes pydantic-field ¤

axes: AxesStr

Axes identifying characters. Same length and order as the axes in shape. | axis | description | | --- | --- | | b | batch (groups multiple samples) | | i | instance/index/element | | t | time | | c | channel | | z | spatial dimension z | | y | spatial dimension y | | x | spatial dimension x |

data_range pydantic-field ¤

data_range: Optional[
    Tuple[
        Annotated[float, AllowInfNan(True)],
        Annotated[float, AllowInfNan(True)],
    ]
] = None

Tuple (minimum, maximum) specifying the allowed range of the data in this tensor. If not specified, the full data range that can be expressed in data_type is allowed.

data_type pydantic-field ¤

data_type: Literal['float32', 'uint8', 'uint16']

For now an input tensor is expected to be given as float32. The data flow in bioimage.io models is explained in this diagram..

description pydantic-field ¤

description: str = ''

name pydantic-field ¤

name: TensorName

Tensor name. No duplicates are allowed.

preprocessing pydantic-field ¤

preprocessing: List[PreprocessingDescr]

Description of how this input should be preprocessed.

shape pydantic-field ¤

shape: Annotated[
    Union[Sequence[int], ParameterizedInputShape],
    Field(
        examples=[
            (1, 512, 512, 1),
            dict(min=(1, 64, 64, 1), step=(0, 32, 32, 0)),
        ]
    ),
]

Specification of input tensor shape.

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_preprocessing_kwargs pydantic-validator ¤

validate_preprocessing_kwargs() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
960
961
962
963
964
965
966
967
968
969
@model_validator(mode="after")
def validate_preprocessing_kwargs(self) -> Self:
    for p in self.preprocessing:
        kwargs_axes = p.kwargs.get("axes")
        if isinstance(kwargs_axes, str) and any(
            a not in self.axes for a in kwargs_axes
        ):
            raise ValueError("`kwargs.axes` needs to be subset of `axes`")

    return self

zero_batch_step_and_one_batch_size pydantic-validator ¤

zero_batch_step_and_one_batch_size() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
@model_validator(mode="after")
def zero_batch_step_and_one_batch_size(self) -> Self:
    bidx = self.axes.find("b")
    if bidx == -1:
        return self

    if isinstance(self.shape, ParameterizedInputShape):
        step = self.shape.step
        shape = self.shape.min
        if step[bidx] != 0:
            raise ValueError(
                "Input shape step has to be zero in the batch dimension (the batch"
                + " dimension can always be increased, but `step` should specify how"
                + " to increase the minimal shape to find the largest single batch"
                + " shape)"
            )
    else:
        shape = self.shape

    if shape[bidx] != 1:
        raise ValueError("Input shape has to be 1 in the batch dimension b.")

    return self

KerasHdf5WeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "AttachmentsDescr": {
      "additionalProperties": true,
      "properties": {
        "files": {
          "description": "File attachments",
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Files",
          "type": "array"
        }
      },
      "title": "generic.v0_2.AttachmentsDescr",
      "type": "object"
    },
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_2.Author",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "The weights file.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "attachments": {
      "anyOf": [
        {
          "$ref": "#/$defs/AttachmentsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Attachments that are specific to this weights entry."
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "dependencies": {
      "anyOf": [
        {
          "pattern": "^.+:.+$",
          "title": "Dependencies",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
      "examples": [
        "conda:environment.yaml",
        "maven:./pom.xml",
        "pip:./requirements.txt"
      ],
      "title": "Dependencies"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "tensorflow_version": {
      "anyOf": [
        {
          "$ref": "#/$defs/Version"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "TensorFlow version used to create these weights"
    }
  },
  "required": [
    "source"
  ],
  "title": "model.v0_4.KerasHdf5WeightsDescr",
  "type": "object"
}

Fields:

Validators:

attachments pydantic-field ¤

attachments: Annotated[
    Union[AttachmentsDescr, None],
    warn(
        None,
        "Weights entry depends on additional attachments.",
        ALERT,
    ),
] = None

Attachments that are specific to this weights entry.

authors pydantic-field ¤

authors: Union[List[Author], None] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

dependencies pydantic-field ¤

dependencies: Annotated[
    Optional[Dependencies],
    warn(
        None,
        "Custom dependencies ({value}) specified. Avoid this whenever possible "
        + "to allow execution in a wider range of software environments.",
    ),
    Field(
        examples=[
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt",
        ]
    ),
] = None

Dependency manager and dependency file, specified as <dependency manager>:<relative file path>.

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: FileSource_

The weights file.

tensorflow_version pydantic-field ¤

tensorflow_version: Optional[Version] = None

TensorFlow version used to create these weights

type class-attribute ¤

type: WeightsFormat = 'keras_hdf5'

weights_format_name class-attribute ¤

weights_format_name: str = 'Keras HDF5'

check_parent_is_not_self pydantic-validator ¤

check_parent_is_not_self() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
288
289
290
291
292
293
@model_validator(mode="after")
def check_parent_is_not_self(self) -> Self:
    if self.type == self.parent:
        raise ValueError("Weights entry can't be it's own parent.")

    return self

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

LinkedModel pydantic-model ¤

Bases: Node

Reference to a bioimage.io model.

Show JSON schema:
{
  "additionalProperties": false,
  "description": "Reference to a bioimage.io model.",
  "properties": {
    "id": {
      "description": "A valid model `id` from the bioimage.io collection.",
      "examples": [
        "affable-shark",
        "ambitious-sloth"
      ],
      "minLength": 1,
      "title": "ModelId",
      "type": "string"
    },
    "version_number": {
      "anyOf": [
        {
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "version number (n-th published version, not the semantic version) of linked model",
      "title": "Version Number"
    }
  },
  "required": [
    "id"
  ],
  "title": "model.v0_4.LinkedModel",
  "type": "object"
}

Fields:

id pydantic-field ¤

id: Annotated[
    ModelId,
    Field(examples=["affable-shark", "ambitious-sloth"]),
]

A valid model id from the bioimage.io collection.

version_number pydantic-field ¤

version_number: Optional[int] = None

version number (n-th published version, not the semantic version) of linked model

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ModelDescr pydantic-model ¤

Bases: GenericModelDescrBase

Specification of the fields used in a bioimage.io-compliant RDF that describes AI models with pretrained weights.

These fields are typically stored in a YAML file which we call a model resource description file (model RDF).

Show JSON schema:
{
  "$defs": {
    "AttachmentsDescr": {
      "additionalProperties": true,
      "properties": {
        "files": {
          "description": "File attachments",
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Files",
          "type": "array"
        }
      },
      "title": "generic.v0_2.AttachmentsDescr",
      "type": "object"
    },
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_2.Author",
      "type": "object"
    },
    "BadgeDescr": {
      "additionalProperties": false,
      "description": "A custom badge",
      "properties": {
        "label": {
          "description": "badge label to display on hover",
          "examples": [
            "Open in Colab"
          ],
          "title": "Label",
          "type": "string"
        },
        "icon": {
          "anyOf": [
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "badge icon (included in bioimage.io package if not a URL)",
          "examples": [
            "https://colab.research.google.com/assets/colab-badge.svg"
          ],
          "title": "Icon"
        },
        "url": {
          "description": "target URL",
          "examples": [
            "https://colab.research.google.com/github/HenriquesLab/ZeroCostDL4Mic/blob/master/Colab_notebooks/U-net_2D_ZeroCostDL4Mic.ipynb"
          ],
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        }
      },
      "required": [
        "label",
        "url"
      ],
      "title": "generic.v0_2.BadgeDescr",
      "type": "object"
    },
    "BinarizeDescr": {
      "additionalProperties": false,
      "description": "BinarizeDescr the tensor with a fixed `BinarizeKwargs.threshold`.\nValues above the threshold will be set to one, values below the threshold to zero.",
      "properties": {
        "name": {
          "const": "binarize",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/BinarizeKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.BinarizeDescr",
      "type": "object"
    },
    "BinarizeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold",
          "title": "Threshold",
          "type": "number"
        }
      },
      "required": [
        "threshold"
      ],
      "title": "model.v0_4.BinarizeKwargs",
      "type": "object"
    },
    "CiteEntry": {
      "additionalProperties": false,
      "properties": {
        "text": {
          "description": "free text description",
          "title": "Text",
          "type": "string"
        },
        "doi": {
          "anyOf": [
            {
              "description": "A digital object identifier, see https://www.doi.org/",
              "pattern": "^10\\.[0-9]{4}.+$",
              "title": "Doi",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A digital object identifier (DOI) is the prefered citation reference.\nSee https://www.doi.org/ for details. (alternatively specify `url`)",
          "title": "Doi"
        },
        "url": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "URL to cite (preferably specify a `doi` instead)",
          "title": "Url"
        }
      },
      "required": [
        "text"
      ],
      "title": "generic.v0_2.CiteEntry",
      "type": "object"
    },
    "ClipDescr": {
      "additionalProperties": false,
      "description": "Clip tensor values to a range.\n\nSet tensor values below `ClipKwargs.min` to `ClipKwargs.min`\nand above `ClipKwargs.max` to `ClipKwargs.max`.",
      "properties": {
        "name": {
          "const": "clip",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ClipKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ClipDescr",
      "type": "object"
    },
    "ClipKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ClipDescr`",
      "properties": {
        "min": {
          "description": "minimum value for clipping",
          "title": "Min",
          "type": "number"
        },
        "max": {
          "description": "maximum value for clipping",
          "title": "Max",
          "type": "number"
        }
      },
      "required": [
        "min",
        "max"
      ],
      "title": "model.v0_4.ClipKwargs",
      "type": "object"
    },
    "DatasetDescr": {
      "additionalProperties": false,
      "description": "A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage\nprocessing.",
      "properties": {
        "name": {
          "description": "A human-friendly name of the resource description",
          "minLength": 1,
          "title": "Name",
          "type": "string"
        },
        "description": {
          "title": "Description",
          "type": "string"
        },
        "covers": {
          "description": "Cover images. Please use an image smaller than 500KB and an aspect ratio width to height of 2:1.\nThe supported image formats are: ('.gif', '.jpeg', '.jpg', '.png', '.svg', '.tif', '.tiff')",
          "examples": [
            [
              "cover.png"
            ]
          ],
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Covers",
          "type": "array"
        },
        "id_emoji": {
          "anyOf": [
            {
              "examples": [
                "\ud83e\udd88",
                "\ud83e\udda5"
              ],
              "maxLength": 1,
              "minLength": 1,
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "UTF-8 emoji for display alongside the `id`.",
          "title": "Id Emoji"
        },
        "authors": {
          "description": "The authors are the creators of the RDF and the primary points of contact.",
          "items": {
            "$ref": "#/$defs/Author"
          },
          "title": "Authors",
          "type": "array"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "file and other attachments"
        },
        "cite": {
          "description": "citations",
          "items": {
            "$ref": "#/$defs/CiteEntry"
          },
          "title": "Cite",
          "type": "array"
        },
        "config": {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "description": "A field for custom configuration that can contain any keys not present in the RDF spec.\nThis means you should not store, for example, a github repo URL in `config` since we already have the\n`git_repo` field defined in the spec.\nKeys in `config` may be very specific to a tool or consumer software. To avoid conflicting definitions,\nit is recommended to wrap added configuration into a sub-field named with the specific domain or tool name,\nfor example:\n```yaml\nconfig:\n    bioimageio:  # here is the domain name\n        my_custom_key: 3837283\n        another_key:\n            nested: value\n    imagej:       # config specific to ImageJ\n        macro_dir: path/to/macro/file\n```\nIf possible, please use [`snake_case`](https://en.wikipedia.org/wiki/Snake_case) for keys in `config`.\nYou may want to list linked files additionally under `attachments` to include them when packaging a resource\n(packaging a resource means downloading/copying important linked files and creating a ZIP archive that contains\nan altered rdf.yaml file with local references to the downloaded files)",
          "examples": [
            {
              "bioimageio": {
                "another_key": {
                  "nested": "value"
                },
                "my_custom_key": 3837283
              },
              "imagej": {
                "macro_dir": "path/to/macro/file"
              }
            }
          ],
          "title": "Config",
          "type": "object"
        },
        "download_url": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "URL to download the resource from (deprecated)",
          "title": "Download Url"
        },
        "git_repo": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A URL to the Git repository where the resource is being developed.",
          "examples": [
            "https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad"
          ],
          "title": "Git Repo"
        },
        "icon": {
          "anyOf": [
            {
              "maxLength": 2,
              "minLength": 1,
              "type": "string"
            },
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An icon for illustration",
          "title": "Icon"
        },
        "links": {
          "description": "IDs of other bioimage.io resources",
          "examples": [
            [
              "ilastik/ilastik",
              "deepimagej/deepimagej",
              "zero/notebook_u-net_3d_zerocostdl4mic"
            ]
          ],
          "items": {
            "type": "string"
          },
          "title": "Links",
          "type": "array"
        },
        "uploader": {
          "anyOf": [
            {
              "$ref": "#/$defs/Uploader"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The person who uploaded the model (e.g. to bioimage.io)"
        },
        "maintainers": {
          "description": "Maintainers of this resource.\nIf not specified `authors` are maintainers and at least some of them should specify their `github_user` name",
          "items": {
            "$ref": "#/$defs/Maintainer"
          },
          "title": "Maintainers",
          "type": "array"
        },
        "rdf_source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Resource description file (RDF) source; used to keep track of where an rdf.yaml was loaded from.\nDo not set this field in a YAML file.",
          "title": "Rdf Source"
        },
        "tags": {
          "description": "Associated tags",
          "examples": [
            [
              "unet2d",
              "pytorch",
              "nucleus",
              "segmentation",
              "dsb2018"
            ]
          ],
          "items": {
            "type": "string"
          },
          "title": "Tags",
          "type": "array"
        },
        "version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The version of the resource following SemVer 2.0."
        },
        "version_number": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "version number (n-th published version, not the semantic version)",
          "title": "Version Number"
        },
        "format_version": {
          "const": "0.2.4",
          "description": "The format version of this resource specification\n(not the `version` of the resource description)\nWhen creating a new resource always use the latest micro/patch version described here.\nThe `format_version` is important for any consumer software to understand how to parse the fields.",
          "title": "Format Version",
          "type": "string"
        },
        "badges": {
          "description": "badges associated with this resource",
          "items": {
            "$ref": "#/$defs/BadgeDescr"
          },
          "title": "Badges",
          "type": "array"
        },
        "documentation": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "URL or relative path to a markdown file with additional documentation.\nThe recommended documentation file name is `README.md`. An `.md` suffix is mandatory.",
          "examples": [
            "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
            "README.md"
          ],
          "title": "Documentation"
        },
        "license": {
          "anyOf": [
            {
              "enum": [
                "0BSD",
                "AAL",
                "Abstyles",
                "AdaCore-doc",
                "Adobe-2006",
                "Adobe-Display-PostScript",
                "Adobe-Glyph",
                "Adobe-Utopia",
                "ADSL",
                "AFL-1.1",
                "AFL-1.2",
                "AFL-2.0",
                "AFL-2.1",
                "AFL-3.0",
                "Afmparse",
                "AGPL-1.0-only",
                "AGPL-1.0-or-later",
                "AGPL-3.0-only",
                "AGPL-3.0-or-later",
                "Aladdin",
                "AMDPLPA",
                "AML",
                "AML-glslang",
                "AMPAS",
                "ANTLR-PD",
                "ANTLR-PD-fallback",
                "Apache-1.0",
                "Apache-1.1",
                "Apache-2.0",
                "APAFML",
                "APL-1.0",
                "App-s2p",
                "APSL-1.0",
                "APSL-1.1",
                "APSL-1.2",
                "APSL-2.0",
                "Arphic-1999",
                "Artistic-1.0",
                "Artistic-1.0-cl8",
                "Artistic-1.0-Perl",
                "Artistic-2.0",
                "ASWF-Digital-Assets-1.0",
                "ASWF-Digital-Assets-1.1",
                "Baekmuk",
                "Bahyph",
                "Barr",
                "bcrypt-Solar-Designer",
                "Beerware",
                "Bitstream-Charter",
                "Bitstream-Vera",
                "BitTorrent-1.0",
                "BitTorrent-1.1",
                "blessing",
                "BlueOak-1.0.0",
                "Boehm-GC",
                "Borceux",
                "Brian-Gladman-2-Clause",
                "Brian-Gladman-3-Clause",
                "BSD-1-Clause",
                "BSD-2-Clause",
                "BSD-2-Clause-Darwin",
                "BSD-2-Clause-Patent",
                "BSD-2-Clause-Views",
                "BSD-3-Clause",
                "BSD-3-Clause-acpica",
                "BSD-3-Clause-Attribution",
                "BSD-3-Clause-Clear",
                "BSD-3-Clause-flex",
                "BSD-3-Clause-HP",
                "BSD-3-Clause-LBNL",
                "BSD-3-Clause-Modification",
                "BSD-3-Clause-No-Military-License",
                "BSD-3-Clause-No-Nuclear-License",
                "BSD-3-Clause-No-Nuclear-License-2014",
                "BSD-3-Clause-No-Nuclear-Warranty",
                "BSD-3-Clause-Open-MPI",
                "BSD-3-Clause-Sun",
                "BSD-4-Clause",
                "BSD-4-Clause-Shortened",
                "BSD-4-Clause-UC",
                "BSD-4.3RENO",
                "BSD-4.3TAHOE",
                "BSD-Advertising-Acknowledgement",
                "BSD-Attribution-HPND-disclaimer",
                "BSD-Inferno-Nettverk",
                "BSD-Protection",
                "BSD-Source-beginning-file",
                "BSD-Source-Code",
                "BSD-Systemics",
                "BSD-Systemics-W3Works",
                "BSL-1.0",
                "BUSL-1.1",
                "bzip2-1.0.6",
                "C-UDA-1.0",
                "CAL-1.0",
                "CAL-1.0-Combined-Work-Exception",
                "Caldera",
                "Caldera-no-preamble",
                "CATOSL-1.1",
                "CC-BY-1.0",
                "CC-BY-2.0",
                "CC-BY-2.5",
                "CC-BY-2.5-AU",
                "CC-BY-3.0",
                "CC-BY-3.0-AT",
                "CC-BY-3.0-AU",
                "CC-BY-3.0-DE",
                "CC-BY-3.0-IGO",
                "CC-BY-3.0-NL",
                "CC-BY-3.0-US",
                "CC-BY-4.0",
                "CC-BY-NC-1.0",
                "CC-BY-NC-2.0",
                "CC-BY-NC-2.5",
                "CC-BY-NC-3.0",
                "CC-BY-NC-3.0-DE",
                "CC-BY-NC-4.0",
                "CC-BY-NC-ND-1.0",
                "CC-BY-NC-ND-2.0",
                "CC-BY-NC-ND-2.5",
                "CC-BY-NC-ND-3.0",
                "CC-BY-NC-ND-3.0-DE",
                "CC-BY-NC-ND-3.0-IGO",
                "CC-BY-NC-ND-4.0",
                "CC-BY-NC-SA-1.0",
                "CC-BY-NC-SA-2.0",
                "CC-BY-NC-SA-2.0-DE",
                "CC-BY-NC-SA-2.0-FR",
                "CC-BY-NC-SA-2.0-UK",
                "CC-BY-NC-SA-2.5",
                "CC-BY-NC-SA-3.0",
                "CC-BY-NC-SA-3.0-DE",
                "CC-BY-NC-SA-3.0-IGO",
                "CC-BY-NC-SA-4.0",
                "CC-BY-ND-1.0",
                "CC-BY-ND-2.0",
                "CC-BY-ND-2.5",
                "CC-BY-ND-3.0",
                "CC-BY-ND-3.0-DE",
                "CC-BY-ND-4.0",
                "CC-BY-SA-1.0",
                "CC-BY-SA-2.0",
                "CC-BY-SA-2.0-UK",
                "CC-BY-SA-2.1-JP",
                "CC-BY-SA-2.5",
                "CC-BY-SA-3.0",
                "CC-BY-SA-3.0-AT",
                "CC-BY-SA-3.0-DE",
                "CC-BY-SA-3.0-IGO",
                "CC-BY-SA-4.0",
                "CC-PDDC",
                "CC0-1.0",
                "CDDL-1.0",
                "CDDL-1.1",
                "CDL-1.0",
                "CDLA-Permissive-1.0",
                "CDLA-Permissive-2.0",
                "CDLA-Sharing-1.0",
                "CECILL-1.0",
                "CECILL-1.1",
                "CECILL-2.0",
                "CECILL-2.1",
                "CECILL-B",
                "CECILL-C",
                "CERN-OHL-1.1",
                "CERN-OHL-1.2",
                "CERN-OHL-P-2.0",
                "CERN-OHL-S-2.0",
                "CERN-OHL-W-2.0",
                "CFITSIO",
                "check-cvs",
                "checkmk",
                "ClArtistic",
                "Clips",
                "CMU-Mach",
                "CMU-Mach-nodoc",
                "CNRI-Jython",
                "CNRI-Python",
                "CNRI-Python-GPL-Compatible",
                "COIL-1.0",
                "Community-Spec-1.0",
                "Condor-1.1",
                "copyleft-next-0.3.0",
                "copyleft-next-0.3.1",
                "Cornell-Lossless-JPEG",
                "CPAL-1.0",
                "CPL-1.0",
                "CPOL-1.02",
                "Cronyx",
                "Crossword",
                "CrystalStacker",
                "CUA-OPL-1.0",
                "Cube",
                "curl",
                "D-FSL-1.0",
                "DEC-3-Clause",
                "diffmark",
                "DL-DE-BY-2.0",
                "DL-DE-ZERO-2.0",
                "DOC",
                "Dotseqn",
                "DRL-1.0",
                "DRL-1.1",
                "DSDP",
                "dtoa",
                "dvipdfm",
                "ECL-1.0",
                "ECL-2.0",
                "EFL-1.0",
                "EFL-2.0",
                "eGenix",
                "Elastic-2.0",
                "Entessa",
                "EPICS",
                "EPL-1.0",
                "EPL-2.0",
                "ErlPL-1.1",
                "etalab-2.0",
                "EUDatagrid",
                "EUPL-1.0",
                "EUPL-1.1",
                "EUPL-1.2",
                "Eurosym",
                "Fair",
                "FBM",
                "FDK-AAC",
                "Ferguson-Twofish",
                "Frameworx-1.0",
                "FreeBSD-DOC",
                "FreeImage",
                "FSFAP",
                "FSFAP-no-warranty-disclaimer",
                "FSFUL",
                "FSFULLR",
                "FSFULLRWD",
                "FTL",
                "Furuseth",
                "fwlw",
                "GCR-docs",
                "GD",
                "GFDL-1.1-invariants-only",
                "GFDL-1.1-invariants-or-later",
                "GFDL-1.1-no-invariants-only",
                "GFDL-1.1-no-invariants-or-later",
                "GFDL-1.1-only",
                "GFDL-1.1-or-later",
                "GFDL-1.2-invariants-only",
                "GFDL-1.2-invariants-or-later",
                "GFDL-1.2-no-invariants-only",
                "GFDL-1.2-no-invariants-or-later",
                "GFDL-1.2-only",
                "GFDL-1.2-or-later",
                "GFDL-1.3-invariants-only",
                "GFDL-1.3-invariants-or-later",
                "GFDL-1.3-no-invariants-only",
                "GFDL-1.3-no-invariants-or-later",
                "GFDL-1.3-only",
                "GFDL-1.3-or-later",
                "Giftware",
                "GL2PS",
                "Glide",
                "Glulxe",
                "GLWTPL",
                "gnuplot",
                "GPL-1.0-only",
                "GPL-1.0-or-later",
                "GPL-2.0-only",
                "GPL-2.0-or-later",
                "GPL-3.0-only",
                "GPL-3.0-or-later",
                "Graphics-Gems",
                "gSOAP-1.3b",
                "gtkbook",
                "HaskellReport",
                "hdparm",
                "Hippocratic-2.1",
                "HP-1986",
                "HP-1989",
                "HPND",
                "HPND-DEC",
                "HPND-doc",
                "HPND-doc-sell",
                "HPND-export-US",
                "HPND-export-US-modify",
                "HPND-Fenneberg-Livingston",
                "HPND-INRIA-IMAG",
                "HPND-Kevlin-Henney",
                "HPND-Markus-Kuhn",
                "HPND-MIT-disclaimer",
                "HPND-Pbmplus",
                "HPND-sell-MIT-disclaimer-xserver",
                "HPND-sell-regexpr",
                "HPND-sell-variant",
                "HPND-sell-variant-MIT-disclaimer",
                "HPND-UC",
                "HTMLTIDY",
                "IBM-pibs",
                "ICU",
                "IEC-Code-Components-EULA",
                "IJG",
                "IJG-short",
                "ImageMagick",
                "iMatix",
                "Imlib2",
                "Info-ZIP",
                "Inner-Net-2.0",
                "Intel",
                "Intel-ACPI",
                "Interbase-1.0",
                "IPA",
                "IPL-1.0",
                "ISC",
                "ISC-Veillard",
                "Jam",
                "JasPer-2.0",
                "JPL-image",
                "JPNIC",
                "JSON",
                "Kastrup",
                "Kazlib",
                "Knuth-CTAN",
                "LAL-1.2",
                "LAL-1.3",
                "Latex2e",
                "Latex2e-translated-notice",
                "Leptonica",
                "LGPL-2.0-only",
                "LGPL-2.0-or-later",
                "LGPL-2.1-only",
                "LGPL-2.1-or-later",
                "LGPL-3.0-only",
                "LGPL-3.0-or-later",
                "LGPLLR",
                "Libpng",
                "libpng-2.0",
                "libselinux-1.0",
                "libtiff",
                "libutil-David-Nugent",
                "LiLiQ-P-1.1",
                "LiLiQ-R-1.1",
                "LiLiQ-Rplus-1.1",
                "Linux-man-pages-1-para",
                "Linux-man-pages-copyleft",
                "Linux-man-pages-copyleft-2-para",
                "Linux-man-pages-copyleft-var",
                "Linux-OpenIB",
                "LOOP",
                "LPD-document",
                "LPL-1.0",
                "LPL-1.02",
                "LPPL-1.0",
                "LPPL-1.1",
                "LPPL-1.2",
                "LPPL-1.3a",
                "LPPL-1.3c",
                "lsof",
                "Lucida-Bitmap-Fonts",
                "LZMA-SDK-9.11-to-9.20",
                "LZMA-SDK-9.22",
                "Mackerras-3-Clause",
                "Mackerras-3-Clause-acknowledgment",
                "magaz",
                "mailprio",
                "MakeIndex",
                "Martin-Birgmeier",
                "McPhee-slideshow",
                "metamail",
                "Minpack",
                "MirOS",
                "MIT",
                "MIT-0",
                "MIT-advertising",
                "MIT-CMU",
                "MIT-enna",
                "MIT-feh",
                "MIT-Festival",
                "MIT-Modern-Variant",
                "MIT-open-group",
                "MIT-testregex",
                "MIT-Wu",
                "MITNFA",
                "MMIXware",
                "Motosoto",
                "MPEG-SSG",
                "mpi-permissive",
                "mpich2",
                "MPL-1.0",
                "MPL-1.1",
                "MPL-2.0",
                "MPL-2.0-no-copyleft-exception",
                "mplus",
                "MS-LPL",
                "MS-PL",
                "MS-RL",
                "MTLL",
                "MulanPSL-1.0",
                "MulanPSL-2.0",
                "Multics",
                "Mup",
                "NAIST-2003",
                "NASA-1.3",
                "Naumen",
                "NBPL-1.0",
                "NCGL-UK-2.0",
                "NCSA",
                "Net-SNMP",
                "NetCDF",
                "Newsletr",
                "NGPL",
                "NICTA-1.0",
                "NIST-PD",
                "NIST-PD-fallback",
                "NIST-Software",
                "NLOD-1.0",
                "NLOD-2.0",
                "NLPL",
                "Nokia",
                "NOSL",
                "Noweb",
                "NPL-1.0",
                "NPL-1.1",
                "NPOSL-3.0",
                "NRL",
                "NTP",
                "NTP-0",
                "O-UDA-1.0",
                "OCCT-PL",
                "OCLC-2.0",
                "ODbL-1.0",
                "ODC-By-1.0",
                "OFFIS",
                "OFL-1.0",
                "OFL-1.0-no-RFN",
                "OFL-1.0-RFN",
                "OFL-1.1",
                "OFL-1.1-no-RFN",
                "OFL-1.1-RFN",
                "OGC-1.0",
                "OGDL-Taiwan-1.0",
                "OGL-Canada-2.0",
                "OGL-UK-1.0",
                "OGL-UK-2.0",
                "OGL-UK-3.0",
                "OGTSL",
                "OLDAP-1.1",
                "OLDAP-1.2",
                "OLDAP-1.3",
                "OLDAP-1.4",
                "OLDAP-2.0",
                "OLDAP-2.0.1",
                "OLDAP-2.1",
                "OLDAP-2.2",
                "OLDAP-2.2.1",
                "OLDAP-2.2.2",
                "OLDAP-2.3",
                "OLDAP-2.4",
                "OLDAP-2.5",
                "OLDAP-2.6",
                "OLDAP-2.7",
                "OLDAP-2.8",
                "OLFL-1.3",
                "OML",
                "OpenPBS-2.3",
                "OpenSSL",
                "OpenSSL-standalone",
                "OpenVision",
                "OPL-1.0",
                "OPL-UK-3.0",
                "OPUBL-1.0",
                "OSET-PL-2.1",
                "OSL-1.0",
                "OSL-1.1",
                "OSL-2.0",
                "OSL-2.1",
                "OSL-3.0",
                "PADL",
                "Parity-6.0.0",
                "Parity-7.0.0",
                "PDDL-1.0",
                "PHP-3.0",
                "PHP-3.01",
                "Pixar",
                "Plexus",
                "pnmstitch",
                "PolyForm-Noncommercial-1.0.0",
                "PolyForm-Small-Business-1.0.0",
                "PostgreSQL",
                "PSF-2.0",
                "psfrag",
                "psutils",
                "Python-2.0",
                "Python-2.0.1",
                "python-ldap",
                "Qhull",
                "QPL-1.0",
                "QPL-1.0-INRIA-2004",
                "radvd",
                "Rdisc",
                "RHeCos-1.1",
                "RPL-1.1",
                "RPL-1.5",
                "RPSL-1.0",
                "RSA-MD",
                "RSCPL",
                "Ruby",
                "SAX-PD",
                "SAX-PD-2.0",
                "Saxpath",
                "SCEA",
                "SchemeReport",
                "Sendmail",
                "Sendmail-8.23",
                "SGI-B-1.0",
                "SGI-B-1.1",
                "SGI-B-2.0",
                "SGI-OpenGL",
                "SGP4",
                "SHL-0.5",
                "SHL-0.51",
                "SimPL-2.0",
                "SISSL",
                "SISSL-1.2",
                "SL",
                "Sleepycat",
                "SMLNJ",
                "SMPPL",
                "SNIA",
                "snprintf",
                "softSurfer",
                "Soundex",
                "Spencer-86",
                "Spencer-94",
                "Spencer-99",
                "SPL-1.0",
                "ssh-keyscan",
                "SSH-OpenSSH",
                "SSH-short",
                "SSLeay-standalone",
                "SSPL-1.0",
                "SugarCRM-1.1.3",
                "Sun-PPP",
                "SunPro",
                "SWL",
                "swrule",
                "Symlinks",
                "TAPR-OHL-1.0",
                "TCL",
                "TCP-wrappers",
                "TermReadKey",
                "TGPPL-1.0",
                "TMate",
                "TORQUE-1.1",
                "TOSL",
                "TPDL",
                "TPL-1.0",
                "TTWL",
                "TTYP0",
                "TU-Berlin-1.0",
                "TU-Berlin-2.0",
                "UCAR",
                "UCL-1.0",
                "ulem",
                "UMich-Merit",
                "Unicode-3.0",
                "Unicode-DFS-2015",
                "Unicode-DFS-2016",
                "Unicode-TOU",
                "UnixCrypt",
                "Unlicense",
                "UPL-1.0",
                "URT-RLE",
                "Vim",
                "VOSTROM",
                "VSL-1.0",
                "W3C",
                "W3C-19980720",
                "W3C-20150513",
                "w3m",
                "Watcom-1.0",
                "Widget-Workshop",
                "Wsuipa",
                "WTFPL",
                "X11",
                "X11-distribute-modifications-variant",
                "Xdebug-1.03",
                "Xerox",
                "Xfig",
                "XFree86-1.1",
                "xinetd",
                "xkeyboard-config-Zinoviev",
                "xlock",
                "Xnet",
                "xpp",
                "XSkat",
                "YPL-1.0",
                "YPL-1.1",
                "Zed",
                "Zeeff",
                "Zend-2.0",
                "Zimbra-1.3",
                "Zimbra-1.4",
                "Zlib",
                "zlib-acknowledgement",
                "ZPL-1.1",
                "ZPL-2.0",
                "ZPL-2.1"
              ],
              "title": "LicenseId",
              "type": "string"
            },
            {
              "enum": [
                "AGPL-1.0",
                "AGPL-3.0",
                "BSD-2-Clause-FreeBSD",
                "BSD-2-Clause-NetBSD",
                "bzip2-1.0.5",
                "eCos-2.0",
                "GFDL-1.1",
                "GFDL-1.2",
                "GFDL-1.3",
                "GPL-1.0",
                "GPL-1.0+",
                "GPL-2.0",
                "GPL-2.0+",
                "GPL-2.0-with-autoconf-exception",
                "GPL-2.0-with-bison-exception",
                "GPL-2.0-with-classpath-exception",
                "GPL-2.0-with-font-exception",
                "GPL-2.0-with-GCC-exception",
                "GPL-3.0",
                "GPL-3.0+",
                "GPL-3.0-with-autoconf-exception",
                "GPL-3.0-with-GCC-exception",
                "LGPL-2.0",
                "LGPL-2.0+",
                "LGPL-2.1",
                "LGPL-2.1+",
                "LGPL-3.0",
                "LGPL-3.0+",
                "Nunit",
                "StandardML-NJ",
                "wxWindows"
              ],
              "title": "DeprecatedLicenseId",
              "type": "string"
            },
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A [SPDX license identifier](https://spdx.org/licenses/).\nWe do not support custom license beyond the SPDX license list, if you need that please\n[open a GitHub issue](https://github.com/bioimage-io/spec-bioimage-io/issues/new/choose\n) to discuss your intentions with the community.",
          "examples": [
            "CC0-1.0",
            "MIT",
            "BSD-2-Clause"
          ],
          "title": "License"
        },
        "type": {
          "const": "dataset",
          "title": "Type",
          "type": "string"
        },
        "id": {
          "anyOf": [
            {
              "minLength": 1,
              "title": "DatasetId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "bioimage.io-wide unique resource identifier\nassigned by bioimage.io; version **un**specific.",
          "title": "Id"
        },
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "\"URL to the source of the dataset.",
          "title": "Source"
        }
      },
      "required": [
        "name",
        "description",
        "format_version",
        "type"
      ],
      "title": "dataset 0.2.4",
      "type": "object"
    },
    "Datetime": {
      "description": "Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format\nwith a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat).",
      "format": "date-time",
      "title": "Datetime",
      "type": "string"
    },
    "ImplicitOutputShape": {
      "additionalProperties": false,
      "description": "Output tensor shape depending on an input tensor shape.\n`shape(output_tensor) = shape(input_tensor) * scale + 2 * offset`",
      "properties": {
        "reference_tensor": {
          "description": "Name of the reference tensor.",
          "minLength": 1,
          "title": "TensorName",
          "type": "string"
        },
        "scale": {
          "description": "output_pix/input_pix for each dimension.\n'null' values indicate new dimensions, whose length is defined by 2*`offset`",
          "items": {
            "anyOf": [
              {
                "type": "number"
              },
              {
                "type": "null"
              }
            ]
          },
          "minItems": 1,
          "title": "Scale",
          "type": "array"
        },
        "offset": {
          "description": "Position of origin wrt to input.",
          "items": {
            "anyOf": [
              {
                "type": "integer"
              },
              {
                "multipleOf": 0.5,
                "type": "number"
              }
            ]
          },
          "minItems": 1,
          "title": "Offset",
          "type": "array"
        }
      },
      "required": [
        "reference_tensor",
        "scale",
        "offset"
      ],
      "title": "model.v0_4.ImplicitOutputShape",
      "type": "object"
    },
    "InputTensorDescr": {
      "additionalProperties": false,
      "properties": {
        "name": {
          "description": "Tensor name. No duplicates are allowed.",
          "minLength": 1,
          "title": "TensorName",
          "type": "string"
        },
        "description": {
          "default": "",
          "title": "Description",
          "type": "string"
        },
        "axes": {
          "description": "Axes identifying characters. Same length and order as the axes in `shape`.\n| axis | description |\n| --- | --- |\n|  b  |  batch (groups multiple samples) |\n|  i  |  instance/index/element |\n|  t  |  time |\n|  c  |  channel |\n|  z  |  spatial dimension z |\n|  y  |  spatial dimension y |\n|  x  |  spatial dimension x |",
          "title": "Axes",
          "type": "string"
        },
        "data_range": {
          "anyOf": [
            {
              "maxItems": 2,
              "minItems": 2,
              "prefixItems": [
                {
                  "type": "number"
                },
                {
                  "type": "number"
                }
              ],
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.\nIf not specified, the full data range that can be expressed in `data_type` is allowed.",
          "title": "Data Range"
        },
        "data_type": {
          "description": "For now an input tensor is expected to be given as `float32`.\nThe data flow in bioimage.io models is explained\n[in this diagram.](https://docs.google.com/drawings/d/1FTw8-Rn6a6nXdkZ_SkMumtcjvur9mtIhRqLwnKqZNHM/edit).",
          "enum": [
            "float32",
            "uint8",
            "uint16"
          ],
          "title": "Data Type",
          "type": "string"
        },
        "shape": {
          "anyOf": [
            {
              "items": {
                "type": "integer"
              },
              "type": "array"
            },
            {
              "$ref": "#/$defs/ParameterizedInputShape"
            }
          ],
          "description": "Specification of input tensor shape.",
          "examples": [
            [
              1,
              512,
              512,
              1
            ],
            {
              "min": [
                1,
                64,
                64,
                1
              ],
              "step": [
                0,
                32,
                32,
                0
              ]
            }
          ],
          "title": "Shape"
        },
        "preprocessing": {
          "description": "Description of how this input should be preprocessed.",
          "items": {
            "discriminator": {
              "mapping": {
                "binarize": "#/$defs/BinarizeDescr",
                "clip": "#/$defs/ClipDescr",
                "scale_linear": "#/$defs/ScaleLinearDescr",
                "scale_range": "#/$defs/ScaleRangeDescr",
                "sigmoid": "#/$defs/SigmoidDescr",
                "zero_mean_unit_variance": "#/$defs/ZeroMeanUnitVarianceDescr"
              },
              "propertyName": "name"
            },
            "oneOf": [
              {
                "$ref": "#/$defs/BinarizeDescr"
              },
              {
                "$ref": "#/$defs/ClipDescr"
              },
              {
                "$ref": "#/$defs/ScaleLinearDescr"
              },
              {
                "$ref": "#/$defs/SigmoidDescr"
              },
              {
                "$ref": "#/$defs/ZeroMeanUnitVarianceDescr"
              },
              {
                "$ref": "#/$defs/ScaleRangeDescr"
              }
            ]
          },
          "title": "Preprocessing",
          "type": "array"
        }
      },
      "required": [
        "name",
        "axes",
        "data_type",
        "shape"
      ],
      "title": "model.v0_4.InputTensorDescr",
      "type": "object"
    },
    "KerasHdf5WeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "tensorflow_version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "TensorFlow version used to create these weights"
        }
      },
      "required": [
        "source"
      ],
      "title": "model.v0_4.KerasHdf5WeightsDescr",
      "type": "object"
    },
    "LinkedDataset": {
      "additionalProperties": false,
      "description": "Reference to a bioimage.io dataset.",
      "properties": {
        "id": {
          "description": "A valid dataset `id` from the bioimage.io collection.",
          "minLength": 1,
          "title": "DatasetId",
          "type": "string"
        },
        "version_number": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "version number (n-th published version, not the semantic version) of linked dataset",
          "title": "Version Number"
        }
      },
      "required": [
        "id"
      ],
      "title": "dataset.v0_2.LinkedDataset",
      "type": "object"
    },
    "LinkedModel": {
      "additionalProperties": false,
      "description": "Reference to a bioimage.io model.",
      "properties": {
        "id": {
          "description": "A valid model `id` from the bioimage.io collection.",
          "examples": [
            "affable-shark",
            "ambitious-sloth"
          ],
          "minLength": 1,
          "title": "ModelId",
          "type": "string"
        },
        "version_number": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "version number (n-th published version, not the semantic version) of linked model",
          "title": "Version Number"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_4.LinkedModel",
      "type": "object"
    },
    "Maintainer": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Name"
        },
        "github_user": {
          "title": "Github User",
          "type": "string"
        }
      },
      "required": [
        "github_user"
      ],
      "title": "generic.v0_2.Maintainer",
      "type": "object"
    },
    "OnnxWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "opset_version": {
          "anyOf": [
            {
              "minimum": 7,
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "ONNX opset version",
          "title": "Opset Version"
        }
      },
      "required": [
        "source"
      ],
      "title": "model.v0_4.OnnxWeightsDescr",
      "type": "object"
    },
    "OutputTensorDescr": {
      "additionalProperties": false,
      "properties": {
        "name": {
          "description": "Tensor name. No duplicates are allowed.",
          "minLength": 1,
          "title": "TensorName",
          "type": "string"
        },
        "description": {
          "default": "",
          "title": "Description",
          "type": "string"
        },
        "axes": {
          "description": "Axes identifying characters. Same length and order as the axes in `shape`.\n| axis | description |\n| --- | --- |\n|  b  |  batch (groups multiple samples) |\n|  i  |  instance/index/element |\n|  t  |  time |\n|  c  |  channel |\n|  z  |  spatial dimension z |\n|  y  |  spatial dimension y |\n|  x  |  spatial dimension x |",
          "title": "Axes",
          "type": "string"
        },
        "data_range": {
          "anyOf": [
            {
              "maxItems": 2,
              "minItems": 2,
              "prefixItems": [
                {
                  "type": "number"
                },
                {
                  "type": "number"
                }
              ],
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.\nIf not specified, the full data range that can be expressed in `data_type` is allowed.",
          "title": "Data Range"
        },
        "data_type": {
          "description": "Data type.\nThe data flow in bioimage.io models is explained\n[in this diagram.](https://docs.google.com/drawings/d/1FTw8-Rn6a6nXdkZ_SkMumtcjvur9mtIhRqLwnKqZNHM/edit).",
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64",
            "bool"
          ],
          "title": "Data Type",
          "type": "string"
        },
        "shape": {
          "anyOf": [
            {
              "items": {
                "type": "integer"
              },
              "type": "array"
            },
            {
              "$ref": "#/$defs/ImplicitOutputShape"
            }
          ],
          "description": "Output tensor shape.",
          "title": "Shape"
        },
        "halo": {
          "anyOf": [
            {
              "items": {
                "type": "integer"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The `halo` that should be cropped from the output tensor to avoid boundary effects.\nThe `halo` is to be cropped from both sides, i.e. `shape_after_crop = shape - 2 * halo`.\nTo document a `halo` that is already cropped by the model `shape.offset` has to be used instead.",
          "title": "Halo"
        },
        "postprocessing": {
          "description": "Description of how this output should be postprocessed.",
          "items": {
            "discriminator": {
              "mapping": {
                "binarize": "#/$defs/BinarizeDescr",
                "clip": "#/$defs/ClipDescr",
                "scale_linear": "#/$defs/ScaleLinearDescr",
                "scale_mean_variance": "#/$defs/ScaleMeanVarianceDescr",
                "scale_range": "#/$defs/ScaleRangeDescr",
                "sigmoid": "#/$defs/SigmoidDescr",
                "zero_mean_unit_variance": "#/$defs/ZeroMeanUnitVarianceDescr"
              },
              "propertyName": "name"
            },
            "oneOf": [
              {
                "$ref": "#/$defs/BinarizeDescr"
              },
              {
                "$ref": "#/$defs/ClipDescr"
              },
              {
                "$ref": "#/$defs/ScaleLinearDescr"
              },
              {
                "$ref": "#/$defs/SigmoidDescr"
              },
              {
                "$ref": "#/$defs/ZeroMeanUnitVarianceDescr"
              },
              {
                "$ref": "#/$defs/ScaleRangeDescr"
              },
              {
                "$ref": "#/$defs/ScaleMeanVarianceDescr"
              }
            ]
          },
          "title": "Postprocessing",
          "type": "array"
        }
      },
      "required": [
        "name",
        "axes",
        "data_type",
        "shape"
      ],
      "title": "model.v0_4.OutputTensorDescr",
      "type": "object"
    },
    "ParameterizedInputShape": {
      "additionalProperties": false,
      "description": "A sequence of valid shapes given by `shape_k = min + k * step for k in {0, 1, ...}`.",
      "properties": {
        "min": {
          "description": "The minimum input shape",
          "items": {
            "type": "integer"
          },
          "minItems": 1,
          "title": "Min",
          "type": "array"
        },
        "step": {
          "description": "The minimum shape change",
          "items": {
            "type": "integer"
          },
          "minItems": 1,
          "title": "Step",
          "type": "array"
        }
      },
      "required": [
        "min",
        "step"
      ],
      "title": "model.v0_4.ParameterizedInputShape",
      "type": "object"
    },
    "PytorchStateDictWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "architecture": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "CallableFromFile",
              "type": "string"
            },
            {
              "pattern": "^.+\\..+$",
              "title": "CallableFromDepencency",
              "type": "string"
            }
          ],
          "description": "callable returning a torch.nn.Module instance.\nLocal implementation: `<relative path to file>:<identifier of implementation within the file>`.\nImplementation in a dependency: `<dependency-package>.<[dependency-module]>.<identifier>`.",
          "examples": [
            "my_function.py:MyNetworkClass",
            "my_module.submodule.get_my_model"
          ],
          "title": "Architecture"
        },
        "architecture_sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The SHA256 of the architecture source file, if the architecture is not defined in a module listed in `dependencies`\nYou can drag and drop your file to this\n[online tool](http://emn178.github.io/online-tools/sha256_checksum.html) to generate a SHA256 in your browser.\nOr you can generate a SHA256 checksum with Python's `hashlib`,\n[here is a codesnippet](https://gist.github.com/FynnBe/e64460463df89439cff218bbf59c1100).",
          "title": "Architecture Sha256"
        },
        "kwargs": {
          "additionalProperties": true,
          "description": "key word arguments for the `architecture` callable",
          "title": "Kwargs",
          "type": "object"
        },
        "pytorch_version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Version of the PyTorch library used.\nIf `depencencies` is specified it should include pytorch and the verison has to match.\n(`dependencies` overrules `pytorch_version`)"
        }
      },
      "required": [
        "source",
        "architecture"
      ],
      "title": "model.v0_4.PytorchStateDictWeightsDescr",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "RunMode": {
      "additionalProperties": false,
      "properties": {
        "name": {
          "anyOf": [
            {
              "const": "deepimagej",
              "type": "string"
            },
            {
              "type": "string"
            }
          ],
          "description": "Run mode name",
          "title": "Name"
        },
        "kwargs": {
          "additionalProperties": true,
          "description": "Run mode specific key word arguments",
          "title": "Kwargs",
          "type": "object"
        }
      },
      "required": [
        "name"
      ],
      "title": "model.v0_4.RunMode",
      "type": "object"
    },
    "ScaleLinearDescr": {
      "additionalProperties": false,
      "description": "Fixed linear scaling.",
      "properties": {
        "name": {
          "const": "scale_linear",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleLinearKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ScaleLinearDescr",
      "type": "object"
    },
    "ScaleLinearKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleLinearDescr`",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to scale jointly.\nFor example xy to scale the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes"
        },
        "gain": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "type": "array"
            }
          ],
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "type": "array"
            }
          ],
          "default": 0.0,
          "description": "additive term",
          "title": "Offset"
        }
      },
      "title": "model.v0_4.ScaleLinearKwargs",
      "type": "object"
    },
    "ScaleMeanVarianceDescr": {
      "additionalProperties": false,
      "description": "Scale the tensor s.t. its mean and variance match a reference tensor.",
      "properties": {
        "name": {
          "const": "scale_mean_variance",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleMeanVarianceKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ScaleMeanVarianceDescr",
      "type": "object"
    },
    "ScaleMeanVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleMeanVarianceDescr`",
      "properties": {
        "mode": {
          "description": "Mode for computing mean and variance.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n| per_dataset | Compute for the entire dataset       |\n| per_sample  | Compute for each sample individually |",
          "enum": [
            "per_dataset",
            "per_sample"
          ],
          "title": "Mode",
          "type": "string"
        },
        "reference_tensor": {
          "description": "Name of tensor to match.",
          "minLength": 1,
          "title": "TensorName",
          "type": "string"
        },
        "axes": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to scale jointly.\nFor example xy to normalize the two image axes for 2d data jointly.\nDefault: scale all non-batch axes jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability:\n\"`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "required": [
        "mode",
        "reference_tensor"
      ],
      "title": "model.v0_4.ScaleMeanVarianceKwargs",
      "type": "object"
    },
    "ScaleRangeDescr": {
      "additionalProperties": false,
      "description": "Scale with percentiles.",
      "properties": {
        "name": {
          "const": "scale_range",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleRangeKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ScaleRangeDescr",
      "type": "object"
    },
    "ScaleRangeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleRangeDescr`\n\nFor `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)\nthis processing step normalizes data to the [0, 1] intervall.\nFor other percentiles the normalized values will partially be outside the [0, 1]\nintervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the\nnormalized values to a range.",
      "properties": {
        "mode": {
          "description": "Mode for computing percentiles.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n| per_dataset | compute for the entire dataset       |\n| per_sample  | compute for each sample individually |",
          "enum": [
            "per_dataset",
            "per_sample"
          ],
          "title": "Mode",
          "type": "string"
        },
        "axes": {
          "description": "The subset of axes to normalize jointly.\nFor example xy to normalize the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes",
          "type": "string"
        },
        "min_percentile": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "number"
            }
          ],
          "default": 0.0,
          "description": "The lower percentile used to determine the value to align with zero.",
          "ge": 0,
          "lt": 100,
          "title": "Min Percentile"
        },
        "max_percentile": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "number"
            }
          ],
          "default": 100.0,
          "description": "The upper percentile used to determine the value to align with one.\nHas to be bigger than `min_percentile`.\nThe range is 1 to 100 instead of 0 to 100 to avoid mistakenly\naccepting percentiles specified in the range 0.0 to 1.0.",
          "gt": 1,
          "le": 100,
          "title": "Max Percentile"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability.\n`out = (tensor - v_lower) / (v_upper - v_lower + eps)`;\nwith `v_lower,v_upper` values at the respective percentiles.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        },
        "reference_tensor": {
          "anyOf": [
            {
              "minLength": 1,
              "title": "TensorName",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tensor name to compute the percentiles from. Default: The tensor itself.\nFor any tensor in `inputs` only input tensor references are allowed.\nFor a tensor in `outputs` only input tensor refereences are allowed if `mode: per_dataset`",
          "title": "Reference Tensor"
        }
      },
      "required": [
        "mode",
        "axes"
      ],
      "title": "model.v0_4.ScaleRangeKwargs",
      "type": "object"
    },
    "SigmoidDescr": {
      "additionalProperties": false,
      "description": "The logistic sigmoid funciton, a.k.a. expit function.",
      "properties": {
        "name": {
          "const": "sigmoid",
          "title": "Name",
          "type": "string"
        }
      },
      "required": [
        "name"
      ],
      "title": "model.v0_4.SigmoidDescr",
      "type": "object"
    },
    "TensorflowJsWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The multi-file weights.\nAll required files/folders should be a zip archive.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "tensorflow_version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Version of the TensorFlow library used."
        }
      },
      "required": [
        "source"
      ],
      "title": "model.v0_4.TensorflowJsWeightsDescr",
      "type": "object"
    },
    "TensorflowSavedModelBundleWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "tensorflow_version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Version of the TensorFlow library used."
        }
      },
      "required": [
        "source"
      ],
      "title": "model.v0_4.TensorflowSavedModelBundleWeightsDescr",
      "type": "object"
    },
    "TorchscriptWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "pytorch_version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Version of the PyTorch library used."
        }
      },
      "required": [
        "source"
      ],
      "title": "model.v0_4.TorchscriptWeightsDescr",
      "type": "object"
    },
    "Uploader": {
      "additionalProperties": false,
      "properties": {
        "email": {
          "description": "Email",
          "format": "email",
          "title": "Email",
          "type": "string"
        },
        "name": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "name",
          "title": "Name"
        }
      },
      "required": [
        "email"
      ],
      "title": "generic.v0_2.Uploader",
      "type": "object"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    },
    "WeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "keras_hdf5": {
          "anyOf": [
            {
              "$ref": "#/$defs/KerasHdf5WeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        },
        "onnx": {
          "anyOf": [
            {
              "$ref": "#/$defs/OnnxWeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        },
        "pytorch_state_dict": {
          "anyOf": [
            {
              "$ref": "#/$defs/PytorchStateDictWeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        },
        "tensorflow_js": {
          "anyOf": [
            {
              "$ref": "#/$defs/TensorflowJsWeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        },
        "tensorflow_saved_model_bundle": {
          "anyOf": [
            {
              "$ref": "#/$defs/TensorflowSavedModelBundleWeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        },
        "torchscript": {
          "anyOf": [
            {
              "$ref": "#/$defs/TorchscriptWeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        }
      },
      "title": "model.v0_4.WeightsDescr",
      "type": "object"
    },
    "YamlValue": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "format": "date",
          "type": "string"
        },
        {
          "format": "date-time",
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        },
        {
          "type": "string"
        },
        {
          "items": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "array"
        },
        {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "object"
        },
        {
          "type": "null"
        }
      ]
    },
    "ZeroMeanUnitVarianceDescr": {
      "additionalProperties": false,
      "description": "Subtract mean and divide by variance.",
      "properties": {
        "name": {
          "const": "zero_mean_unit_variance",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ZeroMeanUnitVarianceKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ZeroMeanUnitVarianceDescr",
      "type": "object"
    },
    "ZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ZeroMeanUnitVarianceDescr`",
      "properties": {
        "mode": {
          "default": "fixed",
          "description": "Mode for computing mean and variance.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n|   fixed     | Fixed values for mean and variance   |\n| per_dataset | Compute for the entire dataset       |\n| per_sample  | Compute for each sample individually |",
          "enum": [
            "fixed",
            "per_dataset",
            "per_sample"
          ],
          "title": "Mode",
          "type": "string"
        },
        "axes": {
          "description": "The subset of axes to normalize jointly.\nFor example `xy` to normalize the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes",
          "type": "string"
        },
        "mean": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The mean value(s) to use for `mode: fixed`.\nFor example `[1.1, 2.2, 3.3]` in the case of a 3 channel image with `axes: xy`.",
          "examples": [
            [
              1.1,
              2.2,
              3.3
            ]
          ],
          "title": "Mean"
        },
        "std": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The standard deviation values to use for `mode: fixed`. Analogous to mean.",
          "examples": [
            [
              0.1,
              0.2,
              0.3
            ]
          ],
          "title": "Std"
        },
        "eps": {
          "default": 1e-06,
          "description": "epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "required": [
        "axes"
      ],
      "title": "model.v0_4.ZeroMeanUnitVarianceKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Specification of the fields used in a bioimage.io-compliant RDF that describes AI models with pretrained weights.\n\nThese fields are typically stored in a YAML file which we call a model resource description file (model RDF).",
  "properties": {
    "name": {
      "description": "A human-readable name of this model.\nIt should be no longer than 64 characters and only contain letter, number, underscore, minus or space characters.",
      "minLength": 1,
      "title": "Name",
      "type": "string"
    },
    "description": {
      "title": "Description",
      "type": "string"
    },
    "covers": {
      "description": "Cover images. Please use an image smaller than 500KB and an aspect ratio width to height of 2:1.\nThe supported image formats are: ('.gif', '.jpeg', '.jpg', '.png', '.svg', '.tif', '.tiff')",
      "examples": [
        [
          "cover.png"
        ]
      ],
      "items": {
        "anyOf": [
          {
            "description": "A URL with the HTTP or HTTPS scheme.",
            "format": "uri",
            "maxLength": 2083,
            "minLength": 1,
            "title": "HttpUrl",
            "type": "string"
          },
          {
            "$ref": "#/$defs/RelativeFilePath"
          },
          {
            "format": "file-path",
            "title": "FilePath",
            "type": "string"
          }
        ]
      },
      "title": "Covers",
      "type": "array"
    },
    "id_emoji": {
      "anyOf": [
        {
          "examples": [
            "\ud83e\udd88",
            "\ud83e\udda5"
          ],
          "maxLength": 1,
          "minLength": 1,
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "UTF-8 emoji for display alongside the `id`.",
      "title": "Id Emoji"
    },
    "authors": {
      "description": "The authors are the creators of the model RDF and the primary points of contact.",
      "items": {
        "$ref": "#/$defs/Author"
      },
      "minItems": 1,
      "title": "Authors",
      "type": "array"
    },
    "attachments": {
      "anyOf": [
        {
          "$ref": "#/$defs/AttachmentsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "file and other attachments"
    },
    "cite": {
      "description": "citations",
      "items": {
        "$ref": "#/$defs/CiteEntry"
      },
      "title": "Cite",
      "type": "array"
    },
    "config": {
      "additionalProperties": {
        "$ref": "#/$defs/YamlValue"
      },
      "description": "A field for custom configuration that can contain any keys not present in the RDF spec.\nThis means you should not store, for example, a github repo URL in `config` since we already have the\n`git_repo` field defined in the spec.\nKeys in `config` may be very specific to a tool or consumer software. To avoid conflicting definitions,\nit is recommended to wrap added configuration into a sub-field named with the specific domain or tool name,\nfor example:\n```yaml\nconfig:\n    bioimageio:  # here is the domain name\n        my_custom_key: 3837283\n        another_key:\n            nested: value\n    imagej:       # config specific to ImageJ\n        macro_dir: path/to/macro/file\n```\nIf possible, please use [`snake_case`](https://en.wikipedia.org/wiki/Snake_case) for keys in `config`.\nYou may want to list linked files additionally under `attachments` to include them when packaging a resource\n(packaging a resource means downloading/copying important linked files and creating a ZIP archive that contains\nan altered rdf.yaml file with local references to the downloaded files)",
      "examples": [
        {
          "bioimageio": {
            "another_key": {
              "nested": "value"
            },
            "my_custom_key": 3837283
          },
          "imagej": {
            "macro_dir": "path/to/macro/file"
          }
        }
      ],
      "title": "Config",
      "type": "object"
    },
    "download_url": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "URL to download the resource from (deprecated)",
      "title": "Download Url"
    },
    "git_repo": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "A URL to the Git repository where the resource is being developed.",
      "examples": [
        "https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad"
      ],
      "title": "Git Repo"
    },
    "icon": {
      "anyOf": [
        {
          "maxLength": 2,
          "minLength": 1,
          "type": "string"
        },
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "An icon for illustration",
      "title": "Icon"
    },
    "links": {
      "description": "IDs of other bioimage.io resources",
      "examples": [
        [
          "ilastik/ilastik",
          "deepimagej/deepimagej",
          "zero/notebook_u-net_3d_zerocostdl4mic"
        ]
      ],
      "items": {
        "type": "string"
      },
      "title": "Links",
      "type": "array"
    },
    "uploader": {
      "anyOf": [
        {
          "$ref": "#/$defs/Uploader"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The person who uploaded the model (e.g. to bioimage.io)"
    },
    "maintainers": {
      "description": "Maintainers of this resource.\nIf not specified `authors` are maintainers and at least some of them should specify their `github_user` name",
      "items": {
        "$ref": "#/$defs/Maintainer"
      },
      "title": "Maintainers",
      "type": "array"
    },
    "rdf_source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Resource description file (RDF) source; used to keep track of where an rdf.yaml was loaded from.\nDo not set this field in a YAML file.",
      "title": "Rdf Source"
    },
    "tags": {
      "description": "Associated tags",
      "examples": [
        [
          "unet2d",
          "pytorch",
          "nucleus",
          "segmentation",
          "dsb2018"
        ]
      ],
      "items": {
        "type": "string"
      },
      "title": "Tags",
      "type": "array"
    },
    "version": {
      "anyOf": [
        {
          "$ref": "#/$defs/Version"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The version of the resource following SemVer 2.0."
    },
    "version_number": {
      "anyOf": [
        {
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "version number (n-th published version, not the semantic version)",
      "title": "Version Number"
    },
    "format_version": {
      "const": "0.4.10",
      "description": "Version of the bioimage.io model description specification used.\nWhen creating a new model always use the latest micro/patch version described here.\nThe `format_version` is important for any consumer software to understand how to parse the fields.",
      "title": "Format Version",
      "type": "string"
    },
    "type": {
      "const": "model",
      "description": "Specialized resource type 'model'",
      "title": "Type",
      "type": "string"
    },
    "id": {
      "anyOf": [
        {
          "minLength": 1,
          "title": "ModelId",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "bioimage.io-wide unique resource identifier\nassigned by bioimage.io; version **un**specific.",
      "title": "Id"
    },
    "documentation": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "URL or relative path to a markdown file with additional documentation.\nThe recommended documentation file name is `README.md`. An `.md` suffix is mandatory.\nThe documentation should include a '[#[#]]# Validation' (sub)section\nwith details on how to quantitatively validate the model on unseen data.",
      "examples": [
        "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
        "README.md"
      ],
      "title": "Documentation"
    },
    "inputs": {
      "description": "Describes the input tensors expected by this model.",
      "items": {
        "$ref": "#/$defs/InputTensorDescr"
      },
      "minItems": 1,
      "title": "Inputs",
      "type": "array"
    },
    "license": {
      "anyOf": [
        {
          "enum": [
            "0BSD",
            "AAL",
            "Abstyles",
            "AdaCore-doc",
            "Adobe-2006",
            "Adobe-Display-PostScript",
            "Adobe-Glyph",
            "Adobe-Utopia",
            "ADSL",
            "AFL-1.1",
            "AFL-1.2",
            "AFL-2.0",
            "AFL-2.1",
            "AFL-3.0",
            "Afmparse",
            "AGPL-1.0-only",
            "AGPL-1.0-or-later",
            "AGPL-3.0-only",
            "AGPL-3.0-or-later",
            "Aladdin",
            "AMDPLPA",
            "AML",
            "AML-glslang",
            "AMPAS",
            "ANTLR-PD",
            "ANTLR-PD-fallback",
            "Apache-1.0",
            "Apache-1.1",
            "Apache-2.0",
            "APAFML",
            "APL-1.0",
            "App-s2p",
            "APSL-1.0",
            "APSL-1.1",
            "APSL-1.2",
            "APSL-2.0",
            "Arphic-1999",
            "Artistic-1.0",
            "Artistic-1.0-cl8",
            "Artistic-1.0-Perl",
            "Artistic-2.0",
            "ASWF-Digital-Assets-1.0",
            "ASWF-Digital-Assets-1.1",
            "Baekmuk",
            "Bahyph",
            "Barr",
            "bcrypt-Solar-Designer",
            "Beerware",
            "Bitstream-Charter",
            "Bitstream-Vera",
            "BitTorrent-1.0",
            "BitTorrent-1.1",
            "blessing",
            "BlueOak-1.0.0",
            "Boehm-GC",
            "Borceux",
            "Brian-Gladman-2-Clause",
            "Brian-Gladman-3-Clause",
            "BSD-1-Clause",
            "BSD-2-Clause",
            "BSD-2-Clause-Darwin",
            "BSD-2-Clause-Patent",
            "BSD-2-Clause-Views",
            "BSD-3-Clause",
            "BSD-3-Clause-acpica",
            "BSD-3-Clause-Attribution",
            "BSD-3-Clause-Clear",
            "BSD-3-Clause-flex",
            "BSD-3-Clause-HP",
            "BSD-3-Clause-LBNL",
            "BSD-3-Clause-Modification",
            "BSD-3-Clause-No-Military-License",
            "BSD-3-Clause-No-Nuclear-License",
            "BSD-3-Clause-No-Nuclear-License-2014",
            "BSD-3-Clause-No-Nuclear-Warranty",
            "BSD-3-Clause-Open-MPI",
            "BSD-3-Clause-Sun",
            "BSD-4-Clause",
            "BSD-4-Clause-Shortened",
            "BSD-4-Clause-UC",
            "BSD-4.3RENO",
            "BSD-4.3TAHOE",
            "BSD-Advertising-Acknowledgement",
            "BSD-Attribution-HPND-disclaimer",
            "BSD-Inferno-Nettverk",
            "BSD-Protection",
            "BSD-Source-beginning-file",
            "BSD-Source-Code",
            "BSD-Systemics",
            "BSD-Systemics-W3Works",
            "BSL-1.0",
            "BUSL-1.1",
            "bzip2-1.0.6",
            "C-UDA-1.0",
            "CAL-1.0",
            "CAL-1.0-Combined-Work-Exception",
            "Caldera",
            "Caldera-no-preamble",
            "CATOSL-1.1",
            "CC-BY-1.0",
            "CC-BY-2.0",
            "CC-BY-2.5",
            "CC-BY-2.5-AU",
            "CC-BY-3.0",
            "CC-BY-3.0-AT",
            "CC-BY-3.0-AU",
            "CC-BY-3.0-DE",
            "CC-BY-3.0-IGO",
            "CC-BY-3.0-NL",
            "CC-BY-3.0-US",
            "CC-BY-4.0",
            "CC-BY-NC-1.0",
            "CC-BY-NC-2.0",
            "CC-BY-NC-2.5",
            "CC-BY-NC-3.0",
            "CC-BY-NC-3.0-DE",
            "CC-BY-NC-4.0",
            "CC-BY-NC-ND-1.0",
            "CC-BY-NC-ND-2.0",
            "CC-BY-NC-ND-2.5",
            "CC-BY-NC-ND-3.0",
            "CC-BY-NC-ND-3.0-DE",
            "CC-BY-NC-ND-3.0-IGO",
            "CC-BY-NC-ND-4.0",
            "CC-BY-NC-SA-1.0",
            "CC-BY-NC-SA-2.0",
            "CC-BY-NC-SA-2.0-DE",
            "CC-BY-NC-SA-2.0-FR",
            "CC-BY-NC-SA-2.0-UK",
            "CC-BY-NC-SA-2.5",
            "CC-BY-NC-SA-3.0",
            "CC-BY-NC-SA-3.0-DE",
            "CC-BY-NC-SA-3.0-IGO",
            "CC-BY-NC-SA-4.0",
            "CC-BY-ND-1.0",
            "CC-BY-ND-2.0",
            "CC-BY-ND-2.5",
            "CC-BY-ND-3.0",
            "CC-BY-ND-3.0-DE",
            "CC-BY-ND-4.0",
            "CC-BY-SA-1.0",
            "CC-BY-SA-2.0",
            "CC-BY-SA-2.0-UK",
            "CC-BY-SA-2.1-JP",
            "CC-BY-SA-2.5",
            "CC-BY-SA-3.0",
            "CC-BY-SA-3.0-AT",
            "CC-BY-SA-3.0-DE",
            "CC-BY-SA-3.0-IGO",
            "CC-BY-SA-4.0",
            "CC-PDDC",
            "CC0-1.0",
            "CDDL-1.0",
            "CDDL-1.1",
            "CDL-1.0",
            "CDLA-Permissive-1.0",
            "CDLA-Permissive-2.0",
            "CDLA-Sharing-1.0",
            "CECILL-1.0",
            "CECILL-1.1",
            "CECILL-2.0",
            "CECILL-2.1",
            "CECILL-B",
            "CECILL-C",
            "CERN-OHL-1.1",
            "CERN-OHL-1.2",
            "CERN-OHL-P-2.0",
            "CERN-OHL-S-2.0",
            "CERN-OHL-W-2.0",
            "CFITSIO",
            "check-cvs",
            "checkmk",
            "ClArtistic",
            "Clips",
            "CMU-Mach",
            "CMU-Mach-nodoc",
            "CNRI-Jython",
            "CNRI-Python",
            "CNRI-Python-GPL-Compatible",
            "COIL-1.0",
            "Community-Spec-1.0",
            "Condor-1.1",
            "copyleft-next-0.3.0",
            "copyleft-next-0.3.1",
            "Cornell-Lossless-JPEG",
            "CPAL-1.0",
            "CPL-1.0",
            "CPOL-1.02",
            "Cronyx",
            "Crossword",
            "CrystalStacker",
            "CUA-OPL-1.0",
            "Cube",
            "curl",
            "D-FSL-1.0",
            "DEC-3-Clause",
            "diffmark",
            "DL-DE-BY-2.0",
            "DL-DE-ZERO-2.0",
            "DOC",
            "Dotseqn",
            "DRL-1.0",
            "DRL-1.1",
            "DSDP",
            "dtoa",
            "dvipdfm",
            "ECL-1.0",
            "ECL-2.0",
            "EFL-1.0",
            "EFL-2.0",
            "eGenix",
            "Elastic-2.0",
            "Entessa",
            "EPICS",
            "EPL-1.0",
            "EPL-2.0",
            "ErlPL-1.1",
            "etalab-2.0",
            "EUDatagrid",
            "EUPL-1.0",
            "EUPL-1.1",
            "EUPL-1.2",
            "Eurosym",
            "Fair",
            "FBM",
            "FDK-AAC",
            "Ferguson-Twofish",
            "Frameworx-1.0",
            "FreeBSD-DOC",
            "FreeImage",
            "FSFAP",
            "FSFAP-no-warranty-disclaimer",
            "FSFUL",
            "FSFULLR",
            "FSFULLRWD",
            "FTL",
            "Furuseth",
            "fwlw",
            "GCR-docs",
            "GD",
            "GFDL-1.1-invariants-only",
            "GFDL-1.1-invariants-or-later",
            "GFDL-1.1-no-invariants-only",
            "GFDL-1.1-no-invariants-or-later",
            "GFDL-1.1-only",
            "GFDL-1.1-or-later",
            "GFDL-1.2-invariants-only",
            "GFDL-1.2-invariants-or-later",
            "GFDL-1.2-no-invariants-only",
            "GFDL-1.2-no-invariants-or-later",
            "GFDL-1.2-only",
            "GFDL-1.2-or-later",
            "GFDL-1.3-invariants-only",
            "GFDL-1.3-invariants-or-later",
            "GFDL-1.3-no-invariants-only",
            "GFDL-1.3-no-invariants-or-later",
            "GFDL-1.3-only",
            "GFDL-1.3-or-later",
            "Giftware",
            "GL2PS",
            "Glide",
            "Glulxe",
            "GLWTPL",
            "gnuplot",
            "GPL-1.0-only",
            "GPL-1.0-or-later",
            "GPL-2.0-only",
            "GPL-2.0-or-later",
            "GPL-3.0-only",
            "GPL-3.0-or-later",
            "Graphics-Gems",
            "gSOAP-1.3b",
            "gtkbook",
            "HaskellReport",
            "hdparm",
            "Hippocratic-2.1",
            "HP-1986",
            "HP-1989",
            "HPND",
            "HPND-DEC",
            "HPND-doc",
            "HPND-doc-sell",
            "HPND-export-US",
            "HPND-export-US-modify",
            "HPND-Fenneberg-Livingston",
            "HPND-INRIA-IMAG",
            "HPND-Kevlin-Henney",
            "HPND-Markus-Kuhn",
            "HPND-MIT-disclaimer",
            "HPND-Pbmplus",
            "HPND-sell-MIT-disclaimer-xserver",
            "HPND-sell-regexpr",
            "HPND-sell-variant",
            "HPND-sell-variant-MIT-disclaimer",
            "HPND-UC",
            "HTMLTIDY",
            "IBM-pibs",
            "ICU",
            "IEC-Code-Components-EULA",
            "IJG",
            "IJG-short",
            "ImageMagick",
            "iMatix",
            "Imlib2",
            "Info-ZIP",
            "Inner-Net-2.0",
            "Intel",
            "Intel-ACPI",
            "Interbase-1.0",
            "IPA",
            "IPL-1.0",
            "ISC",
            "ISC-Veillard",
            "Jam",
            "JasPer-2.0",
            "JPL-image",
            "JPNIC",
            "JSON",
            "Kastrup",
            "Kazlib",
            "Knuth-CTAN",
            "LAL-1.2",
            "LAL-1.3",
            "Latex2e",
            "Latex2e-translated-notice",
            "Leptonica",
            "LGPL-2.0-only",
            "LGPL-2.0-or-later",
            "LGPL-2.1-only",
            "LGPL-2.1-or-later",
            "LGPL-3.0-only",
            "LGPL-3.0-or-later",
            "LGPLLR",
            "Libpng",
            "libpng-2.0",
            "libselinux-1.0",
            "libtiff",
            "libutil-David-Nugent",
            "LiLiQ-P-1.1",
            "LiLiQ-R-1.1",
            "LiLiQ-Rplus-1.1",
            "Linux-man-pages-1-para",
            "Linux-man-pages-copyleft",
            "Linux-man-pages-copyleft-2-para",
            "Linux-man-pages-copyleft-var",
            "Linux-OpenIB",
            "LOOP",
            "LPD-document",
            "LPL-1.0",
            "LPL-1.02",
            "LPPL-1.0",
            "LPPL-1.1",
            "LPPL-1.2",
            "LPPL-1.3a",
            "LPPL-1.3c",
            "lsof",
            "Lucida-Bitmap-Fonts",
            "LZMA-SDK-9.11-to-9.20",
            "LZMA-SDK-9.22",
            "Mackerras-3-Clause",
            "Mackerras-3-Clause-acknowledgment",
            "magaz",
            "mailprio",
            "MakeIndex",
            "Martin-Birgmeier",
            "McPhee-slideshow",
            "metamail",
            "Minpack",
            "MirOS",
            "MIT",
            "MIT-0",
            "MIT-advertising",
            "MIT-CMU",
            "MIT-enna",
            "MIT-feh",
            "MIT-Festival",
            "MIT-Modern-Variant",
            "MIT-open-group",
            "MIT-testregex",
            "MIT-Wu",
            "MITNFA",
            "MMIXware",
            "Motosoto",
            "MPEG-SSG",
            "mpi-permissive",
            "mpich2",
            "MPL-1.0",
            "MPL-1.1",
            "MPL-2.0",
            "MPL-2.0-no-copyleft-exception",
            "mplus",
            "MS-LPL",
            "MS-PL",
            "MS-RL",
            "MTLL",
            "MulanPSL-1.0",
            "MulanPSL-2.0",
            "Multics",
            "Mup",
            "NAIST-2003",
            "NASA-1.3",
            "Naumen",
            "NBPL-1.0",
            "NCGL-UK-2.0",
            "NCSA",
            "Net-SNMP",
            "NetCDF",
            "Newsletr",
            "NGPL",
            "NICTA-1.0",
            "NIST-PD",
            "NIST-PD-fallback",
            "NIST-Software",
            "NLOD-1.0",
            "NLOD-2.0",
            "NLPL",
            "Nokia",
            "NOSL",
            "Noweb",
            "NPL-1.0",
            "NPL-1.1",
            "NPOSL-3.0",
            "NRL",
            "NTP",
            "NTP-0",
            "O-UDA-1.0",
            "OCCT-PL",
            "OCLC-2.0",
            "ODbL-1.0",
            "ODC-By-1.0",
            "OFFIS",
            "OFL-1.0",
            "OFL-1.0-no-RFN",
            "OFL-1.0-RFN",
            "OFL-1.1",
            "OFL-1.1-no-RFN",
            "OFL-1.1-RFN",
            "OGC-1.0",
            "OGDL-Taiwan-1.0",
            "OGL-Canada-2.0",
            "OGL-UK-1.0",
            "OGL-UK-2.0",
            "OGL-UK-3.0",
            "OGTSL",
            "OLDAP-1.1",
            "OLDAP-1.2",
            "OLDAP-1.3",
            "OLDAP-1.4",
            "OLDAP-2.0",
            "OLDAP-2.0.1",
            "OLDAP-2.1",
            "OLDAP-2.2",
            "OLDAP-2.2.1",
            "OLDAP-2.2.2",
            "OLDAP-2.3",
            "OLDAP-2.4",
            "OLDAP-2.5",
            "OLDAP-2.6",
            "OLDAP-2.7",
            "OLDAP-2.8",
            "OLFL-1.3",
            "OML",
            "OpenPBS-2.3",
            "OpenSSL",
            "OpenSSL-standalone",
            "OpenVision",
            "OPL-1.0",
            "OPL-UK-3.0",
            "OPUBL-1.0",
            "OSET-PL-2.1",
            "OSL-1.0",
            "OSL-1.1",
            "OSL-2.0",
            "OSL-2.1",
            "OSL-3.0",
            "PADL",
            "Parity-6.0.0",
            "Parity-7.0.0",
            "PDDL-1.0",
            "PHP-3.0",
            "PHP-3.01",
            "Pixar",
            "Plexus",
            "pnmstitch",
            "PolyForm-Noncommercial-1.0.0",
            "PolyForm-Small-Business-1.0.0",
            "PostgreSQL",
            "PSF-2.0",
            "psfrag",
            "psutils",
            "Python-2.0",
            "Python-2.0.1",
            "python-ldap",
            "Qhull",
            "QPL-1.0",
            "QPL-1.0-INRIA-2004",
            "radvd",
            "Rdisc",
            "RHeCos-1.1",
            "RPL-1.1",
            "RPL-1.5",
            "RPSL-1.0",
            "RSA-MD",
            "RSCPL",
            "Ruby",
            "SAX-PD",
            "SAX-PD-2.0",
            "Saxpath",
            "SCEA",
            "SchemeReport",
            "Sendmail",
            "Sendmail-8.23",
            "SGI-B-1.0",
            "SGI-B-1.1",
            "SGI-B-2.0",
            "SGI-OpenGL",
            "SGP4",
            "SHL-0.5",
            "SHL-0.51",
            "SimPL-2.0",
            "SISSL",
            "SISSL-1.2",
            "SL",
            "Sleepycat",
            "SMLNJ",
            "SMPPL",
            "SNIA",
            "snprintf",
            "softSurfer",
            "Soundex",
            "Spencer-86",
            "Spencer-94",
            "Spencer-99",
            "SPL-1.0",
            "ssh-keyscan",
            "SSH-OpenSSH",
            "SSH-short",
            "SSLeay-standalone",
            "SSPL-1.0",
            "SugarCRM-1.1.3",
            "Sun-PPP",
            "SunPro",
            "SWL",
            "swrule",
            "Symlinks",
            "TAPR-OHL-1.0",
            "TCL",
            "TCP-wrappers",
            "TermReadKey",
            "TGPPL-1.0",
            "TMate",
            "TORQUE-1.1",
            "TOSL",
            "TPDL",
            "TPL-1.0",
            "TTWL",
            "TTYP0",
            "TU-Berlin-1.0",
            "TU-Berlin-2.0",
            "UCAR",
            "UCL-1.0",
            "ulem",
            "UMich-Merit",
            "Unicode-3.0",
            "Unicode-DFS-2015",
            "Unicode-DFS-2016",
            "Unicode-TOU",
            "UnixCrypt",
            "Unlicense",
            "UPL-1.0",
            "URT-RLE",
            "Vim",
            "VOSTROM",
            "VSL-1.0",
            "W3C",
            "W3C-19980720",
            "W3C-20150513",
            "w3m",
            "Watcom-1.0",
            "Widget-Workshop",
            "Wsuipa",
            "WTFPL",
            "X11",
            "X11-distribute-modifications-variant",
            "Xdebug-1.03",
            "Xerox",
            "Xfig",
            "XFree86-1.1",
            "xinetd",
            "xkeyboard-config-Zinoviev",
            "xlock",
            "Xnet",
            "xpp",
            "XSkat",
            "YPL-1.0",
            "YPL-1.1",
            "Zed",
            "Zeeff",
            "Zend-2.0",
            "Zimbra-1.3",
            "Zimbra-1.4",
            "Zlib",
            "zlib-acknowledgement",
            "ZPL-1.1",
            "ZPL-2.0",
            "ZPL-2.1"
          ],
          "title": "LicenseId",
          "type": "string"
        },
        {
          "type": "string"
        }
      ],
      "description": "A [SPDX license identifier](https://spdx.org/licenses/).\nWe do notsupport custom license beyond the SPDX license list, if you need that please\n[open a GitHub issue](https://github.com/bioimage-io/spec-bioimage-io/issues/new/choose\n) to discuss your intentions with the community.",
      "examples": [
        "CC0-1.0",
        "MIT",
        "BSD-2-Clause"
      ],
      "title": "License"
    },
    "outputs": {
      "description": "Describes the output tensors.",
      "items": {
        "$ref": "#/$defs/OutputTensorDescr"
      },
      "minItems": 1,
      "title": "Outputs",
      "type": "array"
    },
    "packaged_by": {
      "description": "The persons that have packaged and uploaded this model.\nOnly required if those persons differ from the `authors`.",
      "items": {
        "$ref": "#/$defs/Author"
      },
      "title": "Packaged By",
      "type": "array"
    },
    "parent": {
      "anyOf": [
        {
          "$ref": "#/$defs/LinkedModel"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The model from which this model is derived, e.g. by fine-tuning the weights."
    },
    "run_mode": {
      "anyOf": [
        {
          "$ref": "#/$defs/RunMode"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Custom run mode for this model: for more complex prediction procedures like test time\ndata augmentation that currently cannot be expressed in the specification.\nNo standard run modes are defined yet."
    },
    "sample_inputs": {
      "description": "URLs/relative paths to sample inputs to illustrate possible inputs for the model,\nfor example stored as PNG or TIFF images.\nThe sample files primarily serve to inform a human user about an example use case",
      "items": {
        "anyOf": [
          {
            "description": "A URL with the HTTP or HTTPS scheme.",
            "format": "uri",
            "maxLength": 2083,
            "minLength": 1,
            "title": "HttpUrl",
            "type": "string"
          },
          {
            "$ref": "#/$defs/RelativeFilePath"
          },
          {
            "format": "file-path",
            "title": "FilePath",
            "type": "string"
          }
        ]
      },
      "title": "Sample Inputs",
      "type": "array"
    },
    "sample_outputs": {
      "description": "URLs/relative paths to sample outputs corresponding to the `sample_inputs`.",
      "items": {
        "anyOf": [
          {
            "description": "A URL with the HTTP or HTTPS scheme.",
            "format": "uri",
            "maxLength": 2083,
            "minLength": 1,
            "title": "HttpUrl",
            "type": "string"
          },
          {
            "$ref": "#/$defs/RelativeFilePath"
          },
          {
            "format": "file-path",
            "title": "FilePath",
            "type": "string"
          }
        ]
      },
      "title": "Sample Outputs",
      "type": "array"
    },
    "test_inputs": {
      "description": "Test input tensors compatible with the `inputs` description for a **single test case**.\nThis means if your model has more than one input, you should provide one URL/relative path for each input.\nEach test input should be a file with an ndarray in\n[numpy.lib file format](https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html#module-numpy.lib.format).\nThe extension must be '.npy'.",
      "items": {
        "anyOf": [
          {
            "description": "A URL with the HTTP or HTTPS scheme.",
            "format": "uri",
            "maxLength": 2083,
            "minLength": 1,
            "title": "HttpUrl",
            "type": "string"
          },
          {
            "$ref": "#/$defs/RelativeFilePath"
          },
          {
            "format": "file-path",
            "title": "FilePath",
            "type": "string"
          }
        ]
      },
      "minItems": 1,
      "title": "Test Inputs",
      "type": "array"
    },
    "test_outputs": {
      "description": "Analog to `test_inputs`.",
      "items": {
        "anyOf": [
          {
            "description": "A URL with the HTTP or HTTPS scheme.",
            "format": "uri",
            "maxLength": 2083,
            "minLength": 1,
            "title": "HttpUrl",
            "type": "string"
          },
          {
            "$ref": "#/$defs/RelativeFilePath"
          },
          {
            "format": "file-path",
            "title": "FilePath",
            "type": "string"
          }
        ]
      },
      "minItems": 1,
      "title": "Test Outputs",
      "type": "array"
    },
    "timestamp": {
      "$ref": "#/$defs/Datetime"
    },
    "training_data": {
      "anyOf": [
        {
          "$ref": "#/$defs/LinkedDataset"
        },
        {
          "$ref": "#/$defs/DatasetDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The dataset used to train this model",
      "title": "Training Data"
    },
    "weights": {
      "$ref": "#/$defs/WeightsDescr",
      "description": "The weights for this model.\nWeights can be given for different formats, but should otherwise be equivalent.\nThe available weight formats determine which consumers can use this model."
    }
  },
  "required": [
    "name",
    "description",
    "authors",
    "format_version",
    "type",
    "documentation",
    "inputs",
    "license",
    "outputs",
    "test_inputs",
    "test_outputs",
    "timestamp",
    "weights"
  ],
  "title": "model 0.4.10",
  "type": "object"
}

Fields:

Validators:

attachments pydantic-field ¤

attachments: Optional[AttachmentsDescr] = None

file and other attachments

authors pydantic-field ¤

authors: NotEmpty[List[Author]]

The authors are the creators of the model RDF and the primary points of contact.

cite pydantic-field ¤

cite: List[CiteEntry]

citations

config pydantic-field ¤

config: Annotated[
    Dict[str, YamlValue],
    Field(
        examples=[
            dict(
                bioimageio={
                    my_custom_key: 3837283,
                    another_key: {nested: value},
                },
                imagej={
                    macro_dir: path / to / macro / file
                },
            )
        ]
    ),
]

A field for custom configuration that can contain any keys not present in the RDF spec. This means you should not store, for example, a github repo URL in config since we already have the git_repo field defined in the spec. Keys in config may be very specific to a tool or consumer software. To avoid conflicting definitions, it is recommended to wrap added configuration into a sub-field named with the specific domain or tool name, for example:

config:
    bioimageio:  # here is the domain name
        my_custom_key: 3837283
        another_key:
            nested: value
    imagej:       # config specific to ImageJ
        macro_dir: path/to/macro/file
If possible, please use snake_case for keys in config. You may want to list linked files additionally under attachments to include them when packaging a resource (packaging a resource means downloading/copying important linked files and creating a ZIP archive that contains an altered rdf.yaml file with local references to the downloaded files)

covers pydantic-field ¤

covers: List[FileSource_cover]

Cover images. Please use an image smaller than 500KB and an aspect ratio width to height of 2:1.

description pydantic-field ¤

description: str

documentation pydantic-field ¤

documentation: Annotated[
    FileSource_,
    Field(
        examples=[
            "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
            "README.md",
        ]
    ),
]

URL or relative path to a markdown file with additional documentation. The recommended documentation file name is README.md. An .md suffix is mandatory. The documentation should include a '[#[#]]# Validation' (sub)section with details on how to quantitatively validate the model on unseen data.

download_url pydantic-field ¤

download_url: Optional[HttpUrl] = None

URL to download the resource from (deprecated)

file_name property ¤

file_name: Optional[FileName]

File name of the bioimageio.yaml file the description was loaded from.

format_version pydantic-field ¤

format_version: Literal['0.4.10'] = '0.4.10'

git_repo pydantic-field ¤

git_repo: Annotated[
    Optional[str],
    Field(
        examples=[
            "https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad"
        ]
    ),
] = None

A URL to the Git repository where the resource is being developed.

icon pydantic-field ¤

icon: Union[
    Annotated[str, Len(min_length=1, max_length=2)],
    FileSource,
    None,
] = None

An icon for illustration

id pydantic-field ¤

id: Optional[ModelId] = None

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

id_emoji pydantic-field ¤

id_emoji: Optional[
    Annotated[
        str,
        Len(min_length=1, max_length=1),
        Field(examples=["🦈", "🦥"]),
    ]
] = None

UTF-8 emoji for display alongside the id.

implemented_format_version class-attribute ¤

implemented_format_version: Literal['0.4.10'] = '0.4.10'

implemented_format_version_tuple class-attribute ¤

implemented_format_version_tuple: Tuple[int, int, int]

implemented_type class-attribute ¤

implemented_type: Literal['model'] = 'model'

inputs pydantic-field ¤

inputs: NotEmpty[List[InputTensorDescr]]

Describes the input tensors expected by this model.

license pydantic-field ¤

license: Annotated[
    Union[LicenseId, str],
    warn(LicenseId, "Unknown license id '{value}'."),
    Field(examples=["CC0-1.0", "MIT", "BSD-2-Clause"]),
]

A SPDX license identifier. We do notsupport custom license beyond the SPDX license list, if you need that please open a GitHub issue to discuss your intentions with the community.

links: Annotated[
    List[str],
    Field(
        examples=[
            (
                ilastik / ilastik,
                deepimagej / deepimagej,
                zero / notebook_u - net_3d_zerocostdl4mic,
            )
        ]
    ),
]

IDs of other bioimage.io resources

maintainers pydantic-field ¤

maintainers: List[Maintainer]

Maintainers of this resource. If not specified authors are maintainers and at least some of them should specify their github_user name

name pydantic-field ¤

name: Annotated[
    str,
    MinLen(1),
    warn(
        MinLen(5), "Name shorter than 5 characters.", INFO
    ),
    warn(
        MaxLen(64), "Name longer than 64 characters.", INFO
    ),
]

A human-readable name of this model. It should be no longer than 64 characters and only contain letter, number, underscore, minus or space characters.

outputs pydantic-field ¤

outputs: NotEmpty[List[OutputTensorDescr]]

Describes the output tensors.

packaged_by pydantic-field ¤

packaged_by: List[Author]

The persons that have packaged and uploaded this model. Only required if those persons differ from the authors.

parent pydantic-field ¤

parent: Optional[LinkedModel] = None

The model from which this model is derived, e.g. by fine-tuning the weights.

rdf_source pydantic-field ¤

rdf_source: Optional[FileSource] = None

Resource description file (RDF) source; used to keep track of where an rdf.yaml was loaded from. Do not set this field in a YAML file.

root property ¤

root: Union[RootHttpUrl, DirectoryPath, ZipFile]

The URL/Path prefix to resolve any relative paths with.

run_mode pydantic-field ¤

run_mode: Optional[RunMode] = None

Custom run mode for this model: for more complex prediction procedures like test time data augmentation that currently cannot be expressed in the specification. No standard run modes are defined yet.

sample_inputs pydantic-field ¤

sample_inputs: List[FileSource_]

URLs/relative paths to sample inputs to illustrate possible inputs for the model, for example stored as PNG or TIFF images. The sample files primarily serve to inform a human user about an example use case

sample_outputs pydantic-field ¤

sample_outputs: List[FileSource_]

URLs/relative paths to sample outputs corresponding to the sample_inputs.

tags pydantic-field ¤

tags: Annotated[
    List[str],
    Field(
        examples=[
            (
                unet2d,
                pytorch,
                nucleus,
                segmentation,
                dsb2018,
            )
        ]
    ),
]

Associated tags

test_inputs pydantic-field ¤

test_inputs: NotEmpty[
    List[
        Annotated[
            FileSource_,
            WithSuffix(".npy", case_sensitive=True),
        ]
    ]
]

Test input tensors compatible with the inputs description for a single test case. This means if your model has more than one input, you should provide one URL/relative path for each input. Each test input should be a file with an ndarray in numpy.lib file format. The extension must be '.npy'.

test_outputs pydantic-field ¤

test_outputs: NotEmpty[
    List[
        Annotated[
            FileSource_,
            WithSuffix(".npy", case_sensitive=True),
        ]
    ]
]

Analog to test_inputs.

timestamp pydantic-field ¤

timestamp: Datetime

Timestamp in ISO 8601 format with a few restrictions listed here.

training_data pydantic-field ¤

training_data: Union[LinkedDataset, DatasetDescr, None] = (
    None
)

The dataset used to train this model

type pydantic-field ¤

type: Literal['model'] = 'model'

uploader pydantic-field ¤

uploader: Optional[Uploader] = None

The person who uploaded the model (e.g. to bioimage.io)

validation_summary property ¤

validation_summary: ValidationSummary

version pydantic-field ¤

version: Optional[Version] = None

The version of the resource following SemVer 2.0.

version_number pydantic-field ¤

version_number: Optional[int] = None

version number (n-th published version, not the semantic version)

weights pydantic-field ¤

The weights for this model. Weights can be given for different formats, but should otherwise be equivalent. The available weight formats determine which consumers can use this model.

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any)
Source code in src/bioimageio/spec/_internal/common_nodes.py
199
200
201
202
203
204
205
206
207
208
209
210
211
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any):
    super().__pydantic_init_subclass__(**kwargs)
    # set classvar implemented_format_version_tuple
    if "format_version" in cls.model_fields:
        if "." not in cls.implemented_format_version:
            cls.implemented_format_version_tuple = (0, 0, 0)
        else:
            fv_tuple = get_format_version_tuple(cls.implemented_format_version)
            assert fv_tuple is not None, (
                f"failed to cast '{cls.implemented_format_version}' to tuple"
            )
            cls.implemented_format_version_tuple = fv_tuple

accept_author_strings classmethod ¤

accept_author_strings(
    authors: Union[Any, Sequence[Any]],
) -> Any

we unofficially accept strings as author entries

Source code in src/bioimageio/spec/generic/v0_2.py
245
246
247
248
249
250
251
252
253
254
255
@field_validator("authors", mode="before")
@classmethod
def accept_author_strings(cls, authors: Union[Any, Sequence[Any]]) -> Any:
    """we unofficially accept strings as author entries"""
    if is_sequence(authors):
        authors = [{"name": a} if isinstance(a, str) else a for a in authors]

    if not authors:
        issue_warning("missing", value=authors, field="authors")

    return authors

get_input_test_arrays ¤

get_input_test_arrays() -> List[NDArray[Any]]
Source code in src/bioimageio/spec/model/v0_4.py
1352
1353
1354
1355
def get_input_test_arrays(self) -> List[NDArray[Any]]:
    data = [load_array(ipt) for ipt in self.test_inputs]
    assert all(isinstance(d, np.ndarray) for d in data)
    return data

get_output_test_arrays ¤

get_output_test_arrays() -> List[NDArray[Any]]
Source code in src/bioimageio/spec/model/v0_4.py
1357
1358
1359
1360
def get_output_test_arrays(self) -> List[NDArray[Any]]:
    data = [load_array(out) for out in self.test_outputs]
    assert all(isinstance(d, np.ndarray) for d in data)
    return data

get_package_content ¤

get_package_content() -> Dict[
    FileName, Union[FileDescr, BioimageioYamlContent]
]

Returns package content without creating the package.

Source code in src/bioimageio/spec/_internal/common_nodes.py
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
def get_package_content(
    self,
) -> Dict[FileName, Union[FileDescr, BioimageioYamlContent]]:
    """Returns package content without creating the package."""
    content: Dict[FileName, FileDescr] = {}
    with PackagingContext(
        bioimageio_yaml_file_name=BIOIMAGEIO_YAML,
        file_sources=content,
    ):
        rdf_content: BioimageioYamlContent = self.model_dump(
            mode="json", exclude_unset=True
        )

    _ = rdf_content.pop("rdf_source", None)

    return {**content, BIOIMAGEIO_YAML: rdf_content}

ignore_url_parent pydantic-validator ¤

ignore_url_parent(parent: Any)
Source code in src/bioimageio/spec/model/v0_4.py
1292
1293
1294
1295
1296
1297
1298
1299
@field_validator("parent", mode="before")
@classmethod
def ignore_url_parent(cls, parent: Any):
    if isinstance(parent, dict):
        return None

    else:
        return parent

load classmethod ¤

load(
    data: BioimageioYamlContentView,
    context: Optional[ValidationContext] = None,
) -> Union[Self, InvalidDescr]

factory method to create a resource description object

Source code in src/bioimageio/spec/_internal/common_nodes.py
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
@classmethod
def load(
    cls,
    data: BioimageioYamlContentView,
    context: Optional[ValidationContext] = None,
) -> Union[Self, InvalidDescr]:
    """factory method to create a resource description object"""
    context = context or get_validation_context()
    if context.perform_io_checks:
        file_descrs = extract_file_descrs({k: v for k, v in data.items()})
        populate_cache(file_descrs)  # TODO: add progress bar

    with context.replace(log_warnings=context.warning_level <= INFO):
        rd, errors, val_warnings = cls._load_impl(deepcopy_yaml_value(data))

    if context.warning_level > INFO:
        all_warnings_context = context.replace(
            warning_level=INFO, log_warnings=False, raise_errors=False
        )
        # raise all validation warnings by reloading
        with all_warnings_context:
            _, _, val_warnings = cls._load_impl(deepcopy_yaml_value(data))

    format_status = "failed" if errors else "passed"
    rd.validation_summary.add_detail(
        ValidationDetail(
            errors=errors,
            name=(
                "bioimageio.spec format validation"
                f" {rd.type} {cls.implemented_format_version}"
            ),
            status=format_status,
            warnings=val_warnings,
        ),
        update_status=False,  # avoid updating status from 'valid-format' to 'passed', but ...
    )
    if format_status == "failed":
        # ... update status in case of failure
        rd.validation_summary.status = "failed"

    return rd

minimum_shape2valid_output pydantic-validator ¤

minimum_shape2valid_output() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
@model_validator(mode="after")
def minimum_shape2valid_output(self) -> Self:
    tensors_by_name: Dict[
        TensorName, Union[InputTensorDescr, OutputTensorDescr]
    ] = {t.name: t for t in self.inputs + self.outputs}

    for out in self.outputs:
        if isinstance(out.shape, ImplicitOutputShape):
            ndim_ref = len(tensors_by_name[out.shape.reference_tensor].shape)
            ndim_out_ref = len(
                [scale for scale in out.shape.scale if scale is not None]
            )
            if ndim_ref != ndim_out_ref:
                expanded_dim_note = (
                    " Note that expanded dimensions (`scale`: null) are not"
                    + f" counted for {out.name}'sdimensionality here."
                    if None in out.shape.scale
                    else ""
                )
                raise ValueError(
                    f"Referenced tensor '{out.shape.reference_tensor}' with"
                    + f" {ndim_ref} dimensions does not match output tensor"
                    + f" '{out.name}' with"
                    + f" {ndim_out_ref} dimensions.{expanded_dim_note}"
                )

        min_out_shape = self._get_min_shape(out, tensors_by_name)
        if out.halo:
            halo = out.halo
            halo_msg = f" for halo {out.halo}"
        else:
            halo = [0] * len(min_out_shape)
            halo_msg = ""

        if any([s - 2 * h < 1 for s, h in zip(min_out_shape, halo)]):
            raise ValueError(
                f"Minimal shape {min_out_shape} of output {out.name} is too"
                + f" small{halo_msg}."
            )

    return self

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

package ¤

package(
    dest: Optional[
        Union[ZipFile, IO[bytes], Path, str]
    ] = None,
) -> ZipFile

package the described resource as a zip archive

Parameters:

  • dest ¤
    (Optional[Union[ZipFile, IO[bytes], Path, str]], default: None ) –

    (path/bytes stream of) destination zipfile

Source code in src/bioimageio/spec/_internal/common_nodes.py
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
def package(
    self, dest: Optional[Union[ZipFile, IO[bytes], Path, str]] = None, /
) -> ZipFile:
    """package the described resource as a zip archive

    Args:
        dest: (path/bytes stream of) destination zipfile
    """
    if dest is None:
        dest = BytesIO()

    if isinstance(dest, ZipFile):
        zip = dest
        if "r" in zip.mode:
            raise ValueError(
                f"zip file {dest} opened in '{zip.mode}' mode,"
                + " but write access is needed for packaging."
            )
    else:
        zip = ZipFile(dest, mode="w")

    if zip.filename is None:
        zip.filename = (
            str(getattr(self, "id", getattr(self, "name", "bioimageio"))) + ".zip"
        )

    content = self.get_package_content()
    write_content_to_zip(content, zip)
    return zip

unique_io_names pydantic-validator ¤

unique_io_names() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
1160
1161
1162
1163
1164
1165
1166
@model_validator(mode="after")
def unique_io_names(self) -> Self:
    unique_names = {str(ss.name) for s in (self.inputs, self.outputs) for ss in s}
    if len(unique_names) != (len(self.inputs) + len(self.outputs)):
        raise ValueError("Duplicate tensor descriptor names across inputs/outputs")

    return self

unique_tensor_descr_names pydantic-validator ¤

unique_tensor_descr_names(
    value: Sequence[
        Union[InputTensorDescr, OutputTensorDescr]
    ],
) -> Sequence[Union[InputTensorDescr, OutputTensorDescr]]
Source code in src/bioimageio/spec/model/v0_4.py
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
@field_validator("inputs", "outputs")
@classmethod
def unique_tensor_descr_names(
    cls, value: Sequence[Union[InputTensorDescr, OutputTensorDescr]]
) -> Sequence[Union[InputTensorDescr, OutputTensorDescr]]:
    unique_names = {str(v.name) for v in value}
    if len(unique_names) != len(value):
        raise ValueError("Duplicate tensor descriptor names")

    return value

validate_tensor_references_in_inputs pydantic-validator ¤

validate_tensor_references_in_inputs() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
@model_validator(mode="after")
def validate_tensor_references_in_inputs(self) -> Self:
    for t in self.inputs:
        for proc in t.preprocessing:
            if "reference_tensor" not in proc.kwargs:
                continue

            ref_tensor = proc.kwargs["reference_tensor"]
            if ref_tensor is not None and str(ref_tensor) not in {
                str(t.name) for t in self.inputs
            }:
                raise ValueError(f"'{ref_tensor}' not found in inputs")

            if ref_tensor == t.name:
                raise ValueError(
                    f"invalid self reference for preprocessing of tensor {t.name}"
                )

    return self

validate_tensor_references_in_outputs pydantic-validator ¤

validate_tensor_references_in_outputs() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
@model_validator(mode="after")
def validate_tensor_references_in_outputs(self) -> Self:
    for t in self.outputs:
        for proc in t.postprocessing:
            if "reference_tensor" not in proc.kwargs:
                continue
            ref_tensor = proc.kwargs["reference_tensor"]
            if ref_tensor is not None and str(ref_tensor) not in {
                str(t.name) for t in self.inputs
            }:
                raise ValueError(f"{ref_tensor} not found in inputs")

    return self

warn_about_tag_categories classmethod ¤

warn_about_tag_categories(
    value: List[str], info: ValidationInfo
) -> List[str]
Source code in src/bioimageio/spec/generic/v0_2.py
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
@as_warning
@field_validator("tags")
@classmethod
def warn_about_tag_categories(
    cls, value: List[str], info: ValidationInfo
) -> List[str]:
    categories = TAG_CATEGORIES.get(info.data["type"], {})
    missing_categories: List[Mapping[str, Sequence[str]]] = []
    for cat, entries in categories.items():
        if not any(e in value for e in entries):
            missing_categories.append({cat: entries})

    if missing_categories:
        raise ValueError(
            "Missing tags from bioimage.io categories: {missing_categories}"
        )

    return value

ModelId ¤

Bases: ResourceId


              flowchart TD
              bioimageio.spec.model.v0_4.ModelId[ModelId]
              bioimageio.spec.generic.v0_2.ResourceId[ResourceId]
              bioimageio.spec._internal.validated_string.ValidatedString[ValidatedString]

                              bioimageio.spec.generic.v0_2.ResourceId --> bioimageio.spec.model.v0_4.ModelId
                                bioimageio.spec._internal.validated_string.ValidatedString --> bioimageio.spec.generic.v0_2.ResourceId
                



              click bioimageio.spec.model.v0_4.ModelId href "" "bioimageio.spec.model.v0_4.ModelId"
              click bioimageio.spec.generic.v0_2.ResourceId href "" "bioimageio.spec.generic.v0_2.ResourceId"
              click bioimageio.spec._internal.validated_string.ValidatedString href "" "bioimageio.spec._internal.validated_string.ValidatedString"
            

Methods:

Attributes:

root_model class-attribute ¤

root_model: Type[RootModel[Any]] = RootModel[
    Annotated[
        NotEmpty[str],
        AfterValidator(str.lower),
        RestrictCharacters(
            string.ascii_lowercase + string.digits + "_-/."
        ),
        annotated_types.Predicate(
            lambda s: not (
                s.startswith("/") or s.endswith("/")
            )
        ),
    ]
]

__get_pydantic_core_schema__ classmethod ¤

__get_pydantic_core_schema__(
    source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema
Source code in src/bioimageio/spec/_internal/validated_string.py
29
30
31
32
33
@classmethod
def __get_pydantic_core_schema__(
    cls, source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema:
    return no_info_after_validator_function(cls, handler(str))

__get_pydantic_json_schema__ classmethod ¤

__get_pydantic_json_schema__(
    core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue
Source code in src/bioimageio/spec/_internal/validated_string.py
35
36
37
38
39
40
41
42
43
44
@classmethod
def __get_pydantic_json_schema__(
    cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
    json_schema = cls.root_model.model_json_schema(mode=handler.mode)
    json_schema["title"] = cls.__name__.strip("_")
    if cls.__doc__:
        json_schema["description"] = cls.__doc__

    return json_schema

__new__ ¤

__new__(object: object)
Source code in src/bioimageio/spec/_internal/validated_string.py
19
20
21
22
23
def __new__(cls, object: object):
    _validated = cls.root_model.model_validate(object).root
    self = super().__new__(cls, _validated)
    self._validated = _validated
    return self._after_validator()

OnnxWeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "AttachmentsDescr": {
      "additionalProperties": true,
      "properties": {
        "files": {
          "description": "File attachments",
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Files",
          "type": "array"
        }
      },
      "title": "generic.v0_2.AttachmentsDescr",
      "type": "object"
    },
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_2.Author",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "The weights file.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "attachments": {
      "anyOf": [
        {
          "$ref": "#/$defs/AttachmentsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Attachments that are specific to this weights entry."
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "dependencies": {
      "anyOf": [
        {
          "pattern": "^.+:.+$",
          "title": "Dependencies",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
      "examples": [
        "conda:environment.yaml",
        "maven:./pom.xml",
        "pip:./requirements.txt"
      ],
      "title": "Dependencies"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "opset_version": {
      "anyOf": [
        {
          "minimum": 7,
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "ONNX opset version",
      "title": "Opset Version"
    }
  },
  "required": [
    "source"
  ],
  "title": "model.v0_4.OnnxWeightsDescr",
  "type": "object"
}

Fields:

  • source (FileSource_)
  • sha256 (Optional[Sha256])
  • attachments (Annotated[Union[AttachmentsDescr, None], warn(None, 'Weights entry depends on additional attachments.', ALERT)])
  • authors (Union[List[Author], None])
  • dependencies (Annotated[Optional[Dependencies], warn(None, 'Custom dependencies ({value}) specified. Avoid this whenever possible ' + 'to allow execution in a wider range of software environments.'), Field(examples=['conda:environment.yaml', 'maven:./pom.xml', 'pip:./requirements.txt'])])
  • parent (Annotated[Optional[WeightsFormat], Field(examples=['pytorch_state_dict'])])
  • opset_version (Optional[Annotated[int, Ge(7)]])

Validators:

attachments pydantic-field ¤

attachments: Annotated[
    Union[AttachmentsDescr, None],
    warn(
        None,
        "Weights entry depends on additional attachments.",
        ALERT,
    ),
] = None

Attachments that are specific to this weights entry.

authors pydantic-field ¤

authors: Union[List[Author], None] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

dependencies pydantic-field ¤

dependencies: Annotated[
    Optional[Dependencies],
    warn(
        None,
        "Custom dependencies ({value}) specified. Avoid this whenever possible "
        + "to allow execution in a wider range of software environments.",
    ),
    Field(
        examples=[
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt",
        ]
    ),
] = None

Dependency manager and dependency file, specified as <dependency manager>:<relative file path>.

opset_version pydantic-field ¤

opset_version: Optional[Annotated[int, Ge(7)]] = None

ONNX opset version

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: FileSource_

The weights file.

type class-attribute ¤

type: WeightsFormat = 'onnx'

weights_format_name class-attribute ¤

weights_format_name: str = 'ONNX'

check_parent_is_not_self pydantic-validator ¤

check_parent_is_not_self() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
288
289
290
291
292
293
@model_validator(mode="after")
def check_parent_is_not_self(self) -> Self:
    if self.type == self.parent:
        raise ValueError("Weights entry can't be it's own parent.")

    return self

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

OutputTensorDescr pydantic-model ¤

Bases: TensorDescrBase

Show JSON schema:
{
  "$defs": {
    "BinarizeDescr": {
      "additionalProperties": false,
      "description": "BinarizeDescr the tensor with a fixed `BinarizeKwargs.threshold`.\nValues above the threshold will be set to one, values below the threshold to zero.",
      "properties": {
        "name": {
          "const": "binarize",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/BinarizeKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.BinarizeDescr",
      "type": "object"
    },
    "BinarizeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold",
          "title": "Threshold",
          "type": "number"
        }
      },
      "required": [
        "threshold"
      ],
      "title": "model.v0_4.BinarizeKwargs",
      "type": "object"
    },
    "ClipDescr": {
      "additionalProperties": false,
      "description": "Clip tensor values to a range.\n\nSet tensor values below `ClipKwargs.min` to `ClipKwargs.min`\nand above `ClipKwargs.max` to `ClipKwargs.max`.",
      "properties": {
        "name": {
          "const": "clip",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ClipKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ClipDescr",
      "type": "object"
    },
    "ClipKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ClipDescr`",
      "properties": {
        "min": {
          "description": "minimum value for clipping",
          "title": "Min",
          "type": "number"
        },
        "max": {
          "description": "maximum value for clipping",
          "title": "Max",
          "type": "number"
        }
      },
      "required": [
        "min",
        "max"
      ],
      "title": "model.v0_4.ClipKwargs",
      "type": "object"
    },
    "ImplicitOutputShape": {
      "additionalProperties": false,
      "description": "Output tensor shape depending on an input tensor shape.\n`shape(output_tensor) = shape(input_tensor) * scale + 2 * offset`",
      "properties": {
        "reference_tensor": {
          "description": "Name of the reference tensor.",
          "minLength": 1,
          "title": "TensorName",
          "type": "string"
        },
        "scale": {
          "description": "output_pix/input_pix for each dimension.\n'null' values indicate new dimensions, whose length is defined by 2*`offset`",
          "items": {
            "anyOf": [
              {
                "type": "number"
              },
              {
                "type": "null"
              }
            ]
          },
          "minItems": 1,
          "title": "Scale",
          "type": "array"
        },
        "offset": {
          "description": "Position of origin wrt to input.",
          "items": {
            "anyOf": [
              {
                "type": "integer"
              },
              {
                "multipleOf": 0.5,
                "type": "number"
              }
            ]
          },
          "minItems": 1,
          "title": "Offset",
          "type": "array"
        }
      },
      "required": [
        "reference_tensor",
        "scale",
        "offset"
      ],
      "title": "model.v0_4.ImplicitOutputShape",
      "type": "object"
    },
    "ScaleLinearDescr": {
      "additionalProperties": false,
      "description": "Fixed linear scaling.",
      "properties": {
        "name": {
          "const": "scale_linear",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleLinearKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ScaleLinearDescr",
      "type": "object"
    },
    "ScaleLinearKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleLinearDescr`",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to scale jointly.\nFor example xy to scale the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes"
        },
        "gain": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "type": "array"
            }
          ],
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "type": "array"
            }
          ],
          "default": 0.0,
          "description": "additive term",
          "title": "Offset"
        }
      },
      "title": "model.v0_4.ScaleLinearKwargs",
      "type": "object"
    },
    "ScaleMeanVarianceDescr": {
      "additionalProperties": false,
      "description": "Scale the tensor s.t. its mean and variance match a reference tensor.",
      "properties": {
        "name": {
          "const": "scale_mean_variance",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleMeanVarianceKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ScaleMeanVarianceDescr",
      "type": "object"
    },
    "ScaleMeanVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleMeanVarianceDescr`",
      "properties": {
        "mode": {
          "description": "Mode for computing mean and variance.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n| per_dataset | Compute for the entire dataset       |\n| per_sample  | Compute for each sample individually |",
          "enum": [
            "per_dataset",
            "per_sample"
          ],
          "title": "Mode",
          "type": "string"
        },
        "reference_tensor": {
          "description": "Name of tensor to match.",
          "minLength": 1,
          "title": "TensorName",
          "type": "string"
        },
        "axes": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to scale jointly.\nFor example xy to normalize the two image axes for 2d data jointly.\nDefault: scale all non-batch axes jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability:\n\"`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "required": [
        "mode",
        "reference_tensor"
      ],
      "title": "model.v0_4.ScaleMeanVarianceKwargs",
      "type": "object"
    },
    "ScaleRangeDescr": {
      "additionalProperties": false,
      "description": "Scale with percentiles.",
      "properties": {
        "name": {
          "const": "scale_range",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleRangeKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ScaleRangeDescr",
      "type": "object"
    },
    "ScaleRangeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleRangeDescr`\n\nFor `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)\nthis processing step normalizes data to the [0, 1] intervall.\nFor other percentiles the normalized values will partially be outside the [0, 1]\nintervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the\nnormalized values to a range.",
      "properties": {
        "mode": {
          "description": "Mode for computing percentiles.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n| per_dataset | compute for the entire dataset       |\n| per_sample  | compute for each sample individually |",
          "enum": [
            "per_dataset",
            "per_sample"
          ],
          "title": "Mode",
          "type": "string"
        },
        "axes": {
          "description": "The subset of axes to normalize jointly.\nFor example xy to normalize the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes",
          "type": "string"
        },
        "min_percentile": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "number"
            }
          ],
          "default": 0.0,
          "description": "The lower percentile used to determine the value to align with zero.",
          "ge": 0,
          "lt": 100,
          "title": "Min Percentile"
        },
        "max_percentile": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "number"
            }
          ],
          "default": 100.0,
          "description": "The upper percentile used to determine the value to align with one.\nHas to be bigger than `min_percentile`.\nThe range is 1 to 100 instead of 0 to 100 to avoid mistakenly\naccepting percentiles specified in the range 0.0 to 1.0.",
          "gt": 1,
          "le": 100,
          "title": "Max Percentile"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability.\n`out = (tensor - v_lower) / (v_upper - v_lower + eps)`;\nwith `v_lower,v_upper` values at the respective percentiles.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        },
        "reference_tensor": {
          "anyOf": [
            {
              "minLength": 1,
              "title": "TensorName",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tensor name to compute the percentiles from. Default: The tensor itself.\nFor any tensor in `inputs` only input tensor references are allowed.\nFor a tensor in `outputs` only input tensor refereences are allowed if `mode: per_dataset`",
          "title": "Reference Tensor"
        }
      },
      "required": [
        "mode",
        "axes"
      ],
      "title": "model.v0_4.ScaleRangeKwargs",
      "type": "object"
    },
    "SigmoidDescr": {
      "additionalProperties": false,
      "description": "The logistic sigmoid funciton, a.k.a. expit function.",
      "properties": {
        "name": {
          "const": "sigmoid",
          "title": "Name",
          "type": "string"
        }
      },
      "required": [
        "name"
      ],
      "title": "model.v0_4.SigmoidDescr",
      "type": "object"
    },
    "ZeroMeanUnitVarianceDescr": {
      "additionalProperties": false,
      "description": "Subtract mean and divide by variance.",
      "properties": {
        "name": {
          "const": "zero_mean_unit_variance",
          "title": "Name",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ZeroMeanUnitVarianceKwargs"
        }
      },
      "required": [
        "name",
        "kwargs"
      ],
      "title": "model.v0_4.ZeroMeanUnitVarianceDescr",
      "type": "object"
    },
    "ZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ZeroMeanUnitVarianceDescr`",
      "properties": {
        "mode": {
          "default": "fixed",
          "description": "Mode for computing mean and variance.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n|   fixed     | Fixed values for mean and variance   |\n| per_dataset | Compute for the entire dataset       |\n| per_sample  | Compute for each sample individually |",
          "enum": [
            "fixed",
            "per_dataset",
            "per_sample"
          ],
          "title": "Mode",
          "type": "string"
        },
        "axes": {
          "description": "The subset of axes to normalize jointly.\nFor example `xy` to normalize the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes",
          "type": "string"
        },
        "mean": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The mean value(s) to use for `mode: fixed`.\nFor example `[1.1, 2.2, 3.3]` in the case of a 3 channel image with `axes: xy`.",
          "examples": [
            [
              1.1,
              2.2,
              3.3
            ]
          ],
          "title": "Mean"
        },
        "std": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The standard deviation values to use for `mode: fixed`. Analogous to mean.",
          "examples": [
            [
              0.1,
              0.2,
              0.3
            ]
          ],
          "title": "Std"
        },
        "eps": {
          "default": 1e-06,
          "description": "epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "required": [
        "axes"
      ],
      "title": "model.v0_4.ZeroMeanUnitVarianceKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "name": {
      "description": "Tensor name. No duplicates are allowed.",
      "minLength": 1,
      "title": "TensorName",
      "type": "string"
    },
    "description": {
      "default": "",
      "title": "Description",
      "type": "string"
    },
    "axes": {
      "description": "Axes identifying characters. Same length and order as the axes in `shape`.\n| axis | description |\n| --- | --- |\n|  b  |  batch (groups multiple samples) |\n|  i  |  instance/index/element |\n|  t  |  time |\n|  c  |  channel |\n|  z  |  spatial dimension z |\n|  y  |  spatial dimension y |\n|  x  |  spatial dimension x |",
      "title": "Axes",
      "type": "string"
    },
    "data_range": {
      "anyOf": [
        {
          "maxItems": 2,
          "minItems": 2,
          "prefixItems": [
            {
              "type": "number"
            },
            {
              "type": "number"
            }
          ],
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.\nIf not specified, the full data range that can be expressed in `data_type` is allowed.",
      "title": "Data Range"
    },
    "data_type": {
      "description": "Data type.\nThe data flow in bioimage.io models is explained\n[in this diagram.](https://docs.google.com/drawings/d/1FTw8-Rn6a6nXdkZ_SkMumtcjvur9mtIhRqLwnKqZNHM/edit).",
      "enum": [
        "float32",
        "float64",
        "uint8",
        "int8",
        "uint16",
        "int16",
        "uint32",
        "int32",
        "uint64",
        "int64",
        "bool"
      ],
      "title": "Data Type",
      "type": "string"
    },
    "shape": {
      "anyOf": [
        {
          "items": {
            "type": "integer"
          },
          "type": "array"
        },
        {
          "$ref": "#/$defs/ImplicitOutputShape"
        }
      ],
      "description": "Output tensor shape.",
      "title": "Shape"
    },
    "halo": {
      "anyOf": [
        {
          "items": {
            "type": "integer"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The `halo` that should be cropped from the output tensor to avoid boundary effects.\nThe `halo` is to be cropped from both sides, i.e. `shape_after_crop = shape - 2 * halo`.\nTo document a `halo` that is already cropped by the model `shape.offset` has to be used instead.",
      "title": "Halo"
    },
    "postprocessing": {
      "description": "Description of how this output should be postprocessed.",
      "items": {
        "discriminator": {
          "mapping": {
            "binarize": "#/$defs/BinarizeDescr",
            "clip": "#/$defs/ClipDescr",
            "scale_linear": "#/$defs/ScaleLinearDescr",
            "scale_mean_variance": "#/$defs/ScaleMeanVarianceDescr",
            "scale_range": "#/$defs/ScaleRangeDescr",
            "sigmoid": "#/$defs/SigmoidDescr",
            "zero_mean_unit_variance": "#/$defs/ZeroMeanUnitVarianceDescr"
          },
          "propertyName": "name"
        },
        "oneOf": [
          {
            "$ref": "#/$defs/BinarizeDescr"
          },
          {
            "$ref": "#/$defs/ClipDescr"
          },
          {
            "$ref": "#/$defs/ScaleLinearDescr"
          },
          {
            "$ref": "#/$defs/SigmoidDescr"
          },
          {
            "$ref": "#/$defs/ZeroMeanUnitVarianceDescr"
          },
          {
            "$ref": "#/$defs/ScaleRangeDescr"
          },
          {
            "$ref": "#/$defs/ScaleMeanVarianceDescr"
          }
        ]
      },
      "title": "Postprocessing",
      "type": "array"
    }
  },
  "required": [
    "name",
    "axes",
    "data_type",
    "shape"
  ],
  "title": "model.v0_4.OutputTensorDescr",
  "type": "object"
}

Fields:

Validators:

axes pydantic-field ¤

axes: AxesStr

Axes identifying characters. Same length and order as the axes in shape. | axis | description | | --- | --- | | b | batch (groups multiple samples) | | i | instance/index/element | | t | time | | c | channel | | z | spatial dimension z | | y | spatial dimension y | | x | spatial dimension x |

data_range pydantic-field ¤

data_range: Optional[
    Tuple[
        Annotated[float, AllowInfNan(True)],
        Annotated[float, AllowInfNan(True)],
    ]
] = None

Tuple (minimum, maximum) specifying the allowed range of the data in this tensor. If not specified, the full data range that can be expressed in data_type is allowed.

data_type pydantic-field ¤

data_type: Literal[
    "float32",
    "float64",
    "uint8",
    "int8",
    "uint16",
    "int16",
    "uint32",
    "int32",
    "uint64",
    "int64",
    "bool",
]

Data type. The data flow in bioimage.io models is explained in this diagram..

description pydantic-field ¤

description: str = ''

halo pydantic-field ¤

halo: Optional[Sequence[int]] = None

The halo that should be cropped from the output tensor to avoid boundary effects. The halo is to be cropped from both sides, i.e. shape_after_crop = shape - 2 * halo. To document a halo that is already cropped by the model shape.offset has to be used instead.

name pydantic-field ¤

name: TensorName

Tensor name. No duplicates are allowed.

postprocessing pydantic-field ¤

postprocessing: List[PostprocessingDescr]

Description of how this output should be postprocessed.

shape pydantic-field ¤

shape: Union[Sequence[int], ImplicitOutputShape]

Output tensor shape.

matching_halo_length pydantic-validator ¤

matching_halo_length() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
1003
1004
1005
1006
1007
1008
1009
1010
@model_validator(mode="after")
def matching_halo_length(self) -> Self:
    if self.halo and len(self.halo) != len(self.shape):
        raise ValueError(
            f"halo {self.halo} has to have same length as shape {self.shape}!"
        )

    return self

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_postprocessing_kwargs pydantic-validator ¤

validate_postprocessing_kwargs() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
@model_validator(mode="after")
def validate_postprocessing_kwargs(self) -> Self:
    for p in self.postprocessing:
        kwargs_axes = p.kwargs.get("axes", "")
        if not isinstance(kwargs_axes, str):
            raise ValueError(f"Expected {kwargs_axes} to be a string")

        if any(a not in self.axes for a in kwargs_axes):
            raise ValueError("`kwargs.axes` needs to be subset of axes")

    return self

ParameterizedInputShape pydantic-model ¤

Bases: Node

A sequence of valid shapes given by shape_k = min + k * step for k in {0, 1, ...}.

Show JSON schema:
{
  "additionalProperties": false,
  "description": "A sequence of valid shapes given by `shape_k = min + k * step for k in {0, 1, ...}`.",
  "properties": {
    "min": {
      "description": "The minimum input shape",
      "items": {
        "type": "integer"
      },
      "minItems": 1,
      "title": "Min",
      "type": "array"
    },
    "step": {
      "description": "The minimum shape change",
      "items": {
        "type": "integer"
      },
      "minItems": 1,
      "title": "Step",
      "type": "array"
    }
  },
  "required": [
    "min",
    "step"
  ],
  "title": "model.v0_4.ParameterizedInputShape",
  "type": "object"
}

Fields:

  • min (NotEmpty[List[int]])
  • step (NotEmpty[List[int]])

Validators:

min pydantic-field ¤

min: NotEmpty[List[int]]

The minimum input shape

step pydantic-field ¤

step: NotEmpty[List[int]]

The minimum shape change

__len__ ¤

__len__() -> int
Source code in src/bioimageio/spec/model/v0_4.py
556
557
def __len__(self) -> int:
    return len(self.min)

matching_lengths pydantic-validator ¤

matching_lengths() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
559
560
561
562
563
564
@model_validator(mode="after")
def matching_lengths(self) -> Self:
    if len(self.min) != len(self.step):
        raise ValueError("`min` and `step` required to have the same length")

    return self

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ProcessingDescrBase pydantic-model ¤

Bases: NodeWithExplicitlySetFields

processing base class

Show JSON schema:
{
  "additionalProperties": false,
  "description": "processing base class",
  "properties": {},
  "title": "model.v0_4.ProcessingDescrBase",
  "type": "object"
}

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ProcessingKwargs pydantic-model ¤

Bases: KwargsNode

base class for pre-/postprocessing key word arguments

Show JSON schema:
{
  "additionalProperties": false,
  "description": "base class for pre-/postprocessing key word arguments",
  "properties": {},
  "title": "model.v0_4.ProcessingKwargs",
  "type": "object"
}

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

PytorchStateDictWeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "AttachmentsDescr": {
      "additionalProperties": true,
      "properties": {
        "files": {
          "description": "File attachments",
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Files",
          "type": "array"
        }
      },
      "title": "generic.v0_2.AttachmentsDescr",
      "type": "object"
    },
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_2.Author",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "The weights file.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "attachments": {
      "anyOf": [
        {
          "$ref": "#/$defs/AttachmentsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Attachments that are specific to this weights entry."
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "dependencies": {
      "anyOf": [
        {
          "pattern": "^.+:.+$",
          "title": "Dependencies",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
      "examples": [
        "conda:environment.yaml",
        "maven:./pom.xml",
        "pip:./requirements.txt"
      ],
      "title": "Dependencies"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "architecture": {
      "anyOf": [
        {
          "pattern": "^.+:.+$",
          "title": "CallableFromFile",
          "type": "string"
        },
        {
          "pattern": "^.+\\..+$",
          "title": "CallableFromDepencency",
          "type": "string"
        }
      ],
      "description": "callable returning a torch.nn.Module instance.\nLocal implementation: `<relative path to file>:<identifier of implementation within the file>`.\nImplementation in a dependency: `<dependency-package>.<[dependency-module]>.<identifier>`.",
      "examples": [
        "my_function.py:MyNetworkClass",
        "my_module.submodule.get_my_model"
      ],
      "title": "Architecture"
    },
    "architecture_sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The SHA256 of the architecture source file, if the architecture is not defined in a module listed in `dependencies`\nYou can drag and drop your file to this\n[online tool](http://emn178.github.io/online-tools/sha256_checksum.html) to generate a SHA256 in your browser.\nOr you can generate a SHA256 checksum with Python's `hashlib`,\n[here is a codesnippet](https://gist.github.com/FynnBe/e64460463df89439cff218bbf59c1100).",
      "title": "Architecture Sha256"
    },
    "kwargs": {
      "additionalProperties": true,
      "description": "key word arguments for the `architecture` callable",
      "title": "Kwargs",
      "type": "object"
    },
    "pytorch_version": {
      "anyOf": [
        {
          "$ref": "#/$defs/Version"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Version of the PyTorch library used.\nIf `depencencies` is specified it should include pytorch and the verison has to match.\n(`dependencies` overrules `pytorch_version`)"
    }
  },
  "required": [
    "source",
    "architecture"
  ],
  "title": "model.v0_4.PytorchStateDictWeightsDescr",
  "type": "object"
}

Fields:

Validators:

architecture pydantic-field ¤

architecture: CustomCallable

callable returning a torch.nn.Module instance. Local implementation: <relative path to file>:<identifier of implementation within the file>. Implementation in a dependency: <dependency-package>.<[dependency-module]>.<identifier>.

architecture_sha256 pydantic-field ¤

architecture_sha256: Annotated[
    Optional[Sha256],
    Field(
        description="The SHA256 of the architecture source file, if the architecture is not defined in a module listed in `dependencies`\n"
        + SHA256_HINT
    ),
] = None

The SHA256 of the architecture source file, if the architecture is not defined in a module listed in dependencies

attachments pydantic-field ¤

attachments: Annotated[
    Union[AttachmentsDescr, None],
    warn(
        None,
        "Weights entry depends on additional attachments.",
        ALERT,
    ),
] = None

Attachments that are specific to this weights entry.

authors pydantic-field ¤

authors: Union[List[Author], None] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

dependencies pydantic-field ¤

dependencies: Annotated[
    Optional[Dependencies],
    warn(
        None,
        "Custom dependencies ({value}) specified. Avoid this whenever possible "
        + "to allow execution in a wider range of software environments.",
    ),
    Field(
        examples=[
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt",
        ]
    ),
] = None

Dependency manager and dependency file, specified as <dependency manager>:<relative file path>.

kwargs pydantic-field ¤

kwargs: Dict[str, Any]

key word arguments for the architecture callable

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

pytorch_version pydantic-field ¤

pytorch_version: Optional[Version] = None

Version of the PyTorch library used. If depencencies is specified it should include pytorch and the verison has to match. (dependencies overrules pytorch_version)

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: FileSource_

The weights file.

type class-attribute ¤

type: WeightsFormat = 'pytorch_state_dict'

weights_format_name class-attribute ¤

weights_format_name: str = 'Pytorch State Dict'

check_architecture_sha256 pydantic-validator ¤

check_architecture_sha256() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
@model_validator(mode="after")
def check_architecture_sha256(self) -> Self:
    if isinstance(self.architecture, CallableFromFile):
        if self.architecture_sha256 is None:
            raise ValueError(
                "Missing required `architecture_sha256` for `architecture` with"
                + " source file."
            )
    elif self.architecture_sha256 is not None:
        raise ValueError(
            "Got `architecture_sha256` for architecture that does not have a source"
            + " file."
        )

    return self

check_parent_is_not_self pydantic-validator ¤

check_parent_is_not_self() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
288
289
290
291
292
293
@model_validator(mode="after")
def check_parent_is_not_self(self) -> Self:
    if self.type == self.parent:
        raise ValueError("Weights entry can't be it's own parent.")

    return self

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

RunMode pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "name": {
      "anyOf": [
        {
          "const": "deepimagej",
          "type": "string"
        },
        {
          "type": "string"
        }
      ],
      "description": "Run mode name",
      "title": "Name"
    },
    "kwargs": {
      "additionalProperties": true,
      "description": "Run mode specific key word arguments",
      "title": "Kwargs",
      "type": "object"
    }
  },
  "required": [
    "name"
  ],
  "title": "model.v0_4.RunMode",
  "type": "object"
}

Fields:

kwargs pydantic-field ¤

kwargs: Dict[str, Any]

Run mode specific key word arguments

name pydantic-field ¤

name: Annotated[
    Union[KnownRunMode, str],
    warn(KnownRunMode, "Unknown run mode '{value}'."),
]

Run mode name

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleLinearDescr pydantic-model ¤

Bases: ProcessingDescrBase

Fixed linear scaling.

Show JSON schema:
{
  "$defs": {
    "ScaleLinearKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleLinearDescr`",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to scale jointly.\nFor example xy to scale the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes"
        },
        "gain": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "type": "array"
            }
          ],
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "type": "array"
            }
          ],
          "default": 0.0,
          "description": "additive term",
          "title": "Offset"
        }
      },
      "title": "model.v0_4.ScaleLinearKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Fixed linear scaling.",
  "properties": {
    "name": {
      "const": "scale_linear",
      "title": "Name",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/ScaleLinearKwargs"
    }
  },
  "required": [
    "name",
    "kwargs"
  ],
  "title": "model.v0_4.ScaleLinearDescr",
  "type": "object"
}

Fields:

implemented_name class-attribute ¤

implemented_name: Literal['scale_linear'] = 'scale_linear'

kwargs pydantic-field ¤

name pydantic-field ¤

name: Literal['scale_linear'] = 'scale_linear'

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleLinearKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for ScaleLinearDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `ScaleLinearDescr`",
  "properties": {
    "axes": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The subset of axes to scale jointly.\nFor example xy to scale the two image axes for 2d data jointly.",
      "examples": [
        "xy"
      ],
      "title": "Axes"
    },
    "gain": {
      "anyOf": [
        {
          "type": "number"
        },
        {
          "items": {
            "type": "number"
          },
          "type": "array"
        }
      ],
      "default": 1.0,
      "description": "multiplicative factor",
      "title": "Gain"
    },
    "offset": {
      "anyOf": [
        {
          "type": "number"
        },
        {
          "items": {
            "type": "number"
          },
          "type": "array"
        }
      ],
      "default": 0.0,
      "description": "additive term",
      "title": "Offset"
    }
  },
  "title": "model.v0_4.ScaleLinearKwargs",
  "type": "object"
}

Fields:

Validators:

axes pydantic-field ¤

axes: Annotated[
    Optional[AxesInCZYX], Field(examples=["xy"])
] = None

The subset of axes to scale jointly. For example xy to scale the two image axes for 2d data jointly.

gain pydantic-field ¤

gain: Union[float, List[float]] = 1.0

multiplicative factor

offset pydantic-field ¤

offset: Union[float, List[float]] = 0.0

additive term

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

either_gain_or_offset pydantic-validator ¤

either_gain_or_offset() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
@model_validator(mode="after")
def either_gain_or_offset(self) -> Self:
    if (
        self.gain == 1.0
        or isinstance(self.gain, list)
        and all(g == 1.0 for g in self.gain)
    ) and (
        self.offset == 0.0
        or isinstance(self.offset, list)
        and all(off == 0.0 for off in self.offset)
    ):
        raise ValueError(
            "Redunt linear scaling not allowd. Set `gain` != 1.0 and/or `offset` !="
            + " 0.0."
        )

    return self

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleMeanVarianceDescr pydantic-model ¤

Bases: ProcessingDescrBase

Scale the tensor s.t. its mean and variance match a reference tensor.

Show JSON schema:
{
  "$defs": {
    "ScaleMeanVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleMeanVarianceDescr`",
      "properties": {
        "mode": {
          "description": "Mode for computing mean and variance.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n| per_dataset | Compute for the entire dataset       |\n| per_sample  | Compute for each sample individually |",
          "enum": [
            "per_dataset",
            "per_sample"
          ],
          "title": "Mode",
          "type": "string"
        },
        "reference_tensor": {
          "description": "Name of tensor to match.",
          "minLength": 1,
          "title": "TensorName",
          "type": "string"
        },
        "axes": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to scale jointly.\nFor example xy to normalize the two image axes for 2d data jointly.\nDefault: scale all non-batch axes jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability:\n\"`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "required": [
        "mode",
        "reference_tensor"
      ],
      "title": "model.v0_4.ScaleMeanVarianceKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Scale the tensor s.t. its mean and variance match a reference tensor.",
  "properties": {
    "name": {
      "const": "scale_mean_variance",
      "title": "Name",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/ScaleMeanVarianceKwargs"
    }
  },
  "required": [
    "name",
    "kwargs"
  ],
  "title": "model.v0_4.ScaleMeanVarianceDescr",
  "type": "object"
}

Fields:

implemented_name class-attribute ¤

implemented_name: Literal["scale_mean_variance"] = (
    "scale_mean_variance"
)

kwargs pydantic-field ¤

name pydantic-field ¤

name: Literal["scale_mean_variance"] = "scale_mean_variance"

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleMeanVarianceKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for ScaleMeanVarianceDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `ScaleMeanVarianceDescr`",
  "properties": {
    "mode": {
      "description": "Mode for computing mean and variance.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n| per_dataset | Compute for the entire dataset       |\n| per_sample  | Compute for each sample individually |",
      "enum": [
        "per_dataset",
        "per_sample"
      ],
      "title": "Mode",
      "type": "string"
    },
    "reference_tensor": {
      "description": "Name of tensor to match.",
      "minLength": 1,
      "title": "TensorName",
      "type": "string"
    },
    "axes": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The subset of axes to scale jointly.\nFor example xy to normalize the two image axes for 2d data jointly.\nDefault: scale all non-batch axes jointly.",
      "examples": [
        "xy"
      ],
      "title": "Axes"
    },
    "eps": {
      "default": 1e-06,
      "description": "Epsilon for numeric stability:\n\"`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.",
      "exclusiveMinimum": 0,
      "maximum": 0.1,
      "title": "Eps",
      "type": "number"
    }
  },
  "required": [
    "mode",
    "reference_tensor"
  ],
  "title": "model.v0_4.ScaleMeanVarianceKwargs",
  "type": "object"
}

Fields:

axes pydantic-field ¤

axes: Annotated[
    Optional[AxesInCZYX], Field(examples=["xy"])
] = None

The subset of axes to scale jointly. For example xy to normalize the two image axes for 2d data jointly. Default: scale all non-batch axes jointly.

eps pydantic-field ¤

eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-06

Epsilon for numeric stability: "`out = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.

mode pydantic-field ¤

mode: Literal['per_dataset', 'per_sample']

Mode for computing mean and variance. | mode | description | | ----------- | ------------------------------------ | | per_dataset | Compute for the entire dataset | | per_sample | Compute for each sample individually |

reference_tensor pydantic-field ¤

reference_tensor: TensorName

Name of tensor to match.

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleRangeDescr pydantic-model ¤

Bases: ProcessingDescrBase

Scale with percentiles.

Show JSON schema:
{
  "$defs": {
    "ScaleRangeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleRangeDescr`\n\nFor `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)\nthis processing step normalizes data to the [0, 1] intervall.\nFor other percentiles the normalized values will partially be outside the [0, 1]\nintervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the\nnormalized values to a range.",
      "properties": {
        "mode": {
          "description": "Mode for computing percentiles.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n| per_dataset | compute for the entire dataset       |\n| per_sample  | compute for each sample individually |",
          "enum": [
            "per_dataset",
            "per_sample"
          ],
          "title": "Mode",
          "type": "string"
        },
        "axes": {
          "description": "The subset of axes to normalize jointly.\nFor example xy to normalize the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes",
          "type": "string"
        },
        "min_percentile": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "number"
            }
          ],
          "default": 0.0,
          "description": "The lower percentile used to determine the value to align with zero.",
          "ge": 0,
          "lt": 100,
          "title": "Min Percentile"
        },
        "max_percentile": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "number"
            }
          ],
          "default": 100.0,
          "description": "The upper percentile used to determine the value to align with one.\nHas to be bigger than `min_percentile`.\nThe range is 1 to 100 instead of 0 to 100 to avoid mistakenly\naccepting percentiles specified in the range 0.0 to 1.0.",
          "gt": 1,
          "le": 100,
          "title": "Max Percentile"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability.\n`out = (tensor - v_lower) / (v_upper - v_lower + eps)`;\nwith `v_lower,v_upper` values at the respective percentiles.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        },
        "reference_tensor": {
          "anyOf": [
            {
              "minLength": 1,
              "title": "TensorName",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tensor name to compute the percentiles from. Default: The tensor itself.\nFor any tensor in `inputs` only input tensor references are allowed.\nFor a tensor in `outputs` only input tensor refereences are allowed if `mode: per_dataset`",
          "title": "Reference Tensor"
        }
      },
      "required": [
        "mode",
        "axes"
      ],
      "title": "model.v0_4.ScaleRangeKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Scale with percentiles.",
  "properties": {
    "name": {
      "const": "scale_range",
      "title": "Name",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/ScaleRangeKwargs"
    }
  },
  "required": [
    "name",
    "kwargs"
  ],
  "title": "model.v0_4.ScaleRangeDescr",
  "type": "object"
}

Fields:

implemented_name class-attribute ¤

implemented_name: Literal['scale_range'] = 'scale_range'

kwargs pydantic-field ¤

name pydantic-field ¤

name: Literal['scale_range'] = 'scale_range'

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleRangeKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for ScaleRangeDescr

For min_percentile=0.0 (the default) and max_percentile=100 (the default) this processing step normalizes data to the [0, 1] intervall. For other percentiles the normalized values will partially be outside the [0, 1] intervall. Use ScaleRange followed by ClipDescr if you want to limit the normalized values to a range.

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `ScaleRangeDescr`\n\nFor `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)\nthis processing step normalizes data to the [0, 1] intervall.\nFor other percentiles the normalized values will partially be outside the [0, 1]\nintervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the\nnormalized values to a range.",
  "properties": {
    "mode": {
      "description": "Mode for computing percentiles.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n| per_dataset | compute for the entire dataset       |\n| per_sample  | compute for each sample individually |",
      "enum": [
        "per_dataset",
        "per_sample"
      ],
      "title": "Mode",
      "type": "string"
    },
    "axes": {
      "description": "The subset of axes to normalize jointly.\nFor example xy to normalize the two image axes for 2d data jointly.",
      "examples": [
        "xy"
      ],
      "title": "Axes",
      "type": "string"
    },
    "min_percentile": {
      "anyOf": [
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "default": 0.0,
      "description": "The lower percentile used to determine the value to align with zero.",
      "ge": 0,
      "lt": 100,
      "title": "Min Percentile"
    },
    "max_percentile": {
      "anyOf": [
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "default": 100.0,
      "description": "The upper percentile used to determine the value to align with one.\nHas to be bigger than `min_percentile`.\nThe range is 1 to 100 instead of 0 to 100 to avoid mistakenly\naccepting percentiles specified in the range 0.0 to 1.0.",
      "gt": 1,
      "le": 100,
      "title": "Max Percentile"
    },
    "eps": {
      "default": 1e-06,
      "description": "Epsilon for numeric stability.\n`out = (tensor - v_lower) / (v_upper - v_lower + eps)`;\nwith `v_lower,v_upper` values at the respective percentiles.",
      "exclusiveMinimum": 0,
      "maximum": 0.1,
      "title": "Eps",
      "type": "number"
    },
    "reference_tensor": {
      "anyOf": [
        {
          "minLength": 1,
          "title": "TensorName",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Tensor name to compute the percentiles from. Default: The tensor itself.\nFor any tensor in `inputs` only input tensor references are allowed.\nFor a tensor in `outputs` only input tensor refereences are allowed if `mode: per_dataset`",
      "title": "Reference Tensor"
    }
  },
  "required": [
    "mode",
    "axes"
  ],
  "title": "model.v0_4.ScaleRangeKwargs",
  "type": "object"
}

Fields:

Validators:

axes pydantic-field ¤

axes: Annotated[AxesInCZYX, Field(examples=['xy'])]

The subset of axes to normalize jointly. For example xy to normalize the two image axes for 2d data jointly.

eps pydantic-field ¤

eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-06

Epsilon for numeric stability. out = (tensor - v_lower) / (v_upper - v_lower + eps); with v_lower,v_upper values at the respective percentiles.

max_percentile pydantic-field ¤

max_percentile: Annotated[
    Union[int, float], Interval(gt=1, le=100)
] = 100.0

The upper percentile used to determine the value to align with one. Has to be bigger than min_percentile. The range is 1 to 100 instead of 0 to 100 to avoid mistakenly accepting percentiles specified in the range 0.0 to 1.0.

min_percentile pydantic-field ¤

min_percentile: Annotated[
    Union[int, float], Interval(ge=0, lt=100)
] = 0.0

The lower percentile used to determine the value to align with zero.

mode pydantic-field ¤

mode: Literal['per_dataset', 'per_sample']

Mode for computing percentiles. | mode | description | | ----------- | ------------------------------------ | | per_dataset | compute for the entire dataset | | per_sample | compute for each sample individually |

reference_tensor pydantic-field ¤

reference_tensor: Optional[TensorName] = None

Tensor name to compute the percentiles from. Default: The tensor itself. For any tensor in inputs only input tensor references are allowed. For a tensor in outputs only input tensor refereences are allowed if mode: per_dataset

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

min_smaller_max pydantic-validator ¤

min_smaller_max(info: ValidationInfo) -> Self
Source code in src/bioimageio/spec/model/v0_4.py
821
822
823
824
825
826
827
828
829
@model_validator(mode="after")
def min_smaller_max(self, info: ValidationInfo) -> Self:
    if self.min_percentile >= self.max_percentile:
        raise ValueError(
            f"min_percentile {self.min_percentile} >= max_percentile"
            + f" {self.max_percentile}"
        )

    return self

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

SigmoidDescr pydantic-model ¤

Bases: ProcessingDescrBase

The logistic sigmoid funciton, a.k.a. expit function.

Show JSON schema:
{
  "additionalProperties": false,
  "description": "The logistic sigmoid funciton, a.k.a. expit function.",
  "properties": {
    "name": {
      "const": "sigmoid",
      "title": "Name",
      "type": "string"
    }
  },
  "required": [
    "name"
  ],
  "title": "model.v0_4.SigmoidDescr",
  "type": "object"
}

Fields:

  • name (Literal['sigmoid'])

implemented_name class-attribute ¤

implemented_name: Literal['sigmoid'] = 'sigmoid'

kwargs property ¤

empty kwargs

name pydantic-field ¤

name: Literal['sigmoid'] = 'sigmoid'

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

TensorDescrBase pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "name": {
      "description": "Tensor name. No duplicates are allowed.",
      "minLength": 1,
      "title": "TensorName",
      "type": "string"
    },
    "description": {
      "default": "",
      "title": "Description",
      "type": "string"
    },
    "axes": {
      "description": "Axes identifying characters. Same length and order as the axes in `shape`.\n| axis | description |\n| --- | --- |\n|  b  |  batch (groups multiple samples) |\n|  i  |  instance/index/element |\n|  t  |  time |\n|  c  |  channel |\n|  z  |  spatial dimension z |\n|  y  |  spatial dimension y |\n|  x  |  spatial dimension x |",
      "title": "Axes",
      "type": "string"
    },
    "data_range": {
      "anyOf": [
        {
          "maxItems": 2,
          "minItems": 2,
          "prefixItems": [
            {
              "type": "number"
            },
            {
              "type": "number"
            }
          ],
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.\nIf not specified, the full data range that can be expressed in `data_type` is allowed.",
      "title": "Data Range"
    }
  },
  "required": [
    "name",
    "axes"
  ],
  "title": "model.v0_4.TensorDescrBase",
  "type": "object"
}

Fields:

axes pydantic-field ¤

axes: AxesStr

Axes identifying characters. Same length and order as the axes in shape. | axis | description | | --- | --- | | b | batch (groups multiple samples) | | i | instance/index/element | | t | time | | c | channel | | z | spatial dimension z | | y | spatial dimension y | | x | spatial dimension x |

data_range pydantic-field ¤

data_range: Optional[
    Tuple[
        Annotated[float, AllowInfNan(True)],
        Annotated[float, AllowInfNan(True)],
    ]
] = None

Tuple (minimum, maximum) specifying the allowed range of the data in this tensor. If not specified, the full data range that can be expressed in data_type is allowed.

description pydantic-field ¤

description: str = ''

name pydantic-field ¤

name: TensorName

Tensor name. No duplicates are allowed.

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

TensorName ¤

Bases: LowerCaseIdentifier


              flowchart TD
              bioimageio.spec.model.v0_4.TensorName[TensorName]
              bioimageio.spec._internal.types.LowerCaseIdentifier[LowerCaseIdentifier]
              bioimageio.spec._internal.validated_string.ValidatedString[ValidatedString]

                              bioimageio.spec._internal.types.LowerCaseIdentifier --> bioimageio.spec.model.v0_4.TensorName
                                bioimageio.spec._internal.validated_string.ValidatedString --> bioimageio.spec._internal.types.LowerCaseIdentifier
                



              click bioimageio.spec.model.v0_4.TensorName href "" "bioimageio.spec.model.v0_4.TensorName"
              click bioimageio.spec._internal.types.LowerCaseIdentifier href "" "bioimageio.spec._internal.types.LowerCaseIdentifier"
              click bioimageio.spec._internal.validated_string.ValidatedString href "" "bioimageio.spec._internal.validated_string.ValidatedString"
            

Methods:

Attributes:

root_model class-attribute ¤

root_model: Type[RootModel[Any]] = RootModel[
    LowerCaseIdentifierAnno
]

__get_pydantic_core_schema__ classmethod ¤

__get_pydantic_core_schema__(
    source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema
Source code in src/bioimageio/spec/_internal/validated_string.py
29
30
31
32
33
@classmethod
def __get_pydantic_core_schema__(
    cls, source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema:
    return no_info_after_validator_function(cls, handler(str))

__get_pydantic_json_schema__ classmethod ¤

__get_pydantic_json_schema__(
    core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue
Source code in src/bioimageio/spec/_internal/validated_string.py
35
36
37
38
39
40
41
42
43
44
@classmethod
def __get_pydantic_json_schema__(
    cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
    json_schema = cls.root_model.model_json_schema(mode=handler.mode)
    json_schema["title"] = cls.__name__.strip("_")
    if cls.__doc__:
        json_schema["description"] = cls.__doc__

    return json_schema

__new__ ¤

__new__(object: object)
Source code in src/bioimageio/spec/_internal/validated_string.py
19
20
21
22
23
def __new__(cls, object: object):
    _validated = cls.root_model.model_validate(object).root
    self = super().__new__(cls, _validated)
    self._validated = _validated
    return self._after_validator()

TensorflowJsWeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "AttachmentsDescr": {
      "additionalProperties": true,
      "properties": {
        "files": {
          "description": "File attachments",
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Files",
          "type": "array"
        }
      },
      "title": "generic.v0_2.AttachmentsDescr",
      "type": "object"
    },
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_2.Author",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "The multi-file weights.\nAll required files/folders should be a zip archive.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "attachments": {
      "anyOf": [
        {
          "$ref": "#/$defs/AttachmentsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Attachments that are specific to this weights entry."
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "dependencies": {
      "anyOf": [
        {
          "pattern": "^.+:.+$",
          "title": "Dependencies",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
      "examples": [
        "conda:environment.yaml",
        "maven:./pom.xml",
        "pip:./requirements.txt"
      ],
      "title": "Dependencies"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "tensorflow_version": {
      "anyOf": [
        {
          "$ref": "#/$defs/Version"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Version of the TensorFlow library used."
    }
  },
  "required": [
    "source"
  ],
  "title": "model.v0_4.TensorflowJsWeightsDescr",
  "type": "object"
}

Fields:

Validators:

attachments pydantic-field ¤

attachments: Annotated[
    Union[AttachmentsDescr, None],
    warn(
        None,
        "Weights entry depends on additional attachments.",
        ALERT,
    ),
] = None

Attachments that are specific to this weights entry.

authors pydantic-field ¤

authors: Union[List[Author], None] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

dependencies pydantic-field ¤

dependencies: Annotated[
    Optional[Dependencies],
    warn(
        None,
        "Custom dependencies ({value}) specified. Avoid this whenever possible "
        + "to allow execution in a wider range of software environments.",
    ),
    Field(
        examples=[
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt",
        ]
    ),
] = None

Dependency manager and dependency file, specified as <dependency manager>:<relative file path>.

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: FileSource_

The multi-file weights. All required files/folders should be a zip archive.

tensorflow_version pydantic-field ¤

tensorflow_version: Optional[Version] = None

Version of the TensorFlow library used.

type class-attribute ¤

type: WeightsFormat = 'tensorflow_js'

weights_format_name class-attribute ¤

weights_format_name: str = 'Tensorflow.js'

check_parent_is_not_self pydantic-validator ¤

check_parent_is_not_self() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
288
289
290
291
292
293
@model_validator(mode="after")
def check_parent_is_not_self(self) -> Self:
    if self.type == self.parent:
        raise ValueError("Weights entry can't be it's own parent.")

    return self

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

TensorflowSavedModelBundleWeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "AttachmentsDescr": {
      "additionalProperties": true,
      "properties": {
        "files": {
          "description": "File attachments",
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Files",
          "type": "array"
        }
      },
      "title": "generic.v0_2.AttachmentsDescr",
      "type": "object"
    },
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_2.Author",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "The weights file.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "attachments": {
      "anyOf": [
        {
          "$ref": "#/$defs/AttachmentsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Attachments that are specific to this weights entry."
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "dependencies": {
      "anyOf": [
        {
          "pattern": "^.+:.+$",
          "title": "Dependencies",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
      "examples": [
        "conda:environment.yaml",
        "maven:./pom.xml",
        "pip:./requirements.txt"
      ],
      "title": "Dependencies"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "tensorflow_version": {
      "anyOf": [
        {
          "$ref": "#/$defs/Version"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Version of the TensorFlow library used."
    }
  },
  "required": [
    "source"
  ],
  "title": "model.v0_4.TensorflowSavedModelBundleWeightsDescr",
  "type": "object"
}

Fields:

Validators:

attachments pydantic-field ¤

attachments: Annotated[
    Union[AttachmentsDescr, None],
    warn(
        None,
        "Weights entry depends on additional attachments.",
        ALERT,
    ),
] = None

Attachments that are specific to this weights entry.

authors pydantic-field ¤

authors: Union[List[Author], None] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

dependencies pydantic-field ¤

dependencies: Annotated[
    Optional[Dependencies],
    warn(
        None,
        "Custom dependencies ({value}) specified. Avoid this whenever possible "
        + "to allow execution in a wider range of software environments.",
    ),
    Field(
        examples=[
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt",
        ]
    ),
] = None

Dependency manager and dependency file, specified as <dependency manager>:<relative file path>.

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: FileSource_

The weights file.

tensorflow_version pydantic-field ¤

tensorflow_version: Optional[Version] = None

Version of the TensorFlow library used.

type class-attribute ¤

type: WeightsFormat = 'tensorflow_saved_model_bundle'

weights_format_name class-attribute ¤

weights_format_name: str = 'Tensorflow Saved Model'

check_parent_is_not_self pydantic-validator ¤

check_parent_is_not_self() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
288
289
290
291
292
293
@model_validator(mode="after")
def check_parent_is_not_self(self) -> Self:
    if self.type == self.parent:
        raise ValueError("Weights entry can't be it's own parent.")

    return self

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

TorchscriptWeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "AttachmentsDescr": {
      "additionalProperties": true,
      "properties": {
        "files": {
          "description": "File attachments",
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Files",
          "type": "array"
        }
      },
      "title": "generic.v0_2.AttachmentsDescr",
      "type": "object"
    },
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_2.Author",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "The weights file.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "attachments": {
      "anyOf": [
        {
          "$ref": "#/$defs/AttachmentsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Attachments that are specific to this weights entry."
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "dependencies": {
      "anyOf": [
        {
          "pattern": "^.+:.+$",
          "title": "Dependencies",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
      "examples": [
        "conda:environment.yaml",
        "maven:./pom.xml",
        "pip:./requirements.txt"
      ],
      "title": "Dependencies"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "pytorch_version": {
      "anyOf": [
        {
          "$ref": "#/$defs/Version"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Version of the PyTorch library used."
    }
  },
  "required": [
    "source"
  ],
  "title": "model.v0_4.TorchscriptWeightsDescr",
  "type": "object"
}

Fields:

Validators:

attachments pydantic-field ¤

attachments: Annotated[
    Union[AttachmentsDescr, None],
    warn(
        None,
        "Weights entry depends on additional attachments.",
        ALERT,
    ),
] = None

Attachments that are specific to this weights entry.

authors pydantic-field ¤

authors: Union[List[Author], None] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

dependencies pydantic-field ¤

dependencies: Annotated[
    Optional[Dependencies],
    warn(
        None,
        "Custom dependencies ({value}) specified. Avoid this whenever possible "
        + "to allow execution in a wider range of software environments.",
    ),
    Field(
        examples=[
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt",
        ]
    ),
] = None

Dependency manager and dependency file, specified as <dependency manager>:<relative file path>.

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

pytorch_version pydantic-field ¤

pytorch_version: Optional[Version] = None

Version of the PyTorch library used.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: FileSource_

The weights file.

type class-attribute ¤

type: WeightsFormat = 'torchscript'

weights_format_name class-attribute ¤

weights_format_name: str = 'TorchScript'

check_parent_is_not_self pydantic-validator ¤

check_parent_is_not_self() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
288
289
290
291
292
293
@model_validator(mode="after")
def check_parent_is_not_self(self) -> Self:
    if self.type == self.parent:
        raise ValueError("Weights entry can't be it's own parent.")

    return self

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

WeightsDescr pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "$defs": {
    "AttachmentsDescr": {
      "additionalProperties": true,
      "properties": {
        "files": {
          "description": "File attachments",
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Files",
          "type": "array"
        }
      },
      "title": "generic.v0_2.AttachmentsDescr",
      "type": "object"
    },
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_2.Author",
      "type": "object"
    },
    "KerasHdf5WeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "tensorflow_version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "TensorFlow version used to create these weights"
        }
      },
      "required": [
        "source"
      ],
      "title": "model.v0_4.KerasHdf5WeightsDescr",
      "type": "object"
    },
    "OnnxWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "opset_version": {
          "anyOf": [
            {
              "minimum": 7,
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "ONNX opset version",
          "title": "Opset Version"
        }
      },
      "required": [
        "source"
      ],
      "title": "model.v0_4.OnnxWeightsDescr",
      "type": "object"
    },
    "PytorchStateDictWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "architecture": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "CallableFromFile",
              "type": "string"
            },
            {
              "pattern": "^.+\\..+$",
              "title": "CallableFromDepencency",
              "type": "string"
            }
          ],
          "description": "callable returning a torch.nn.Module instance.\nLocal implementation: `<relative path to file>:<identifier of implementation within the file>`.\nImplementation in a dependency: `<dependency-package>.<[dependency-module]>.<identifier>`.",
          "examples": [
            "my_function.py:MyNetworkClass",
            "my_module.submodule.get_my_model"
          ],
          "title": "Architecture"
        },
        "architecture_sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The SHA256 of the architecture source file, if the architecture is not defined in a module listed in `dependencies`\nYou can drag and drop your file to this\n[online tool](http://emn178.github.io/online-tools/sha256_checksum.html) to generate a SHA256 in your browser.\nOr you can generate a SHA256 checksum with Python's `hashlib`,\n[here is a codesnippet](https://gist.github.com/FynnBe/e64460463df89439cff218bbf59c1100).",
          "title": "Architecture Sha256"
        },
        "kwargs": {
          "additionalProperties": true,
          "description": "key word arguments for the `architecture` callable",
          "title": "Kwargs",
          "type": "object"
        },
        "pytorch_version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Version of the PyTorch library used.\nIf `depencencies` is specified it should include pytorch and the verison has to match.\n(`dependencies` overrules `pytorch_version`)"
        }
      },
      "required": [
        "source",
        "architecture"
      ],
      "title": "model.v0_4.PytorchStateDictWeightsDescr",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "TensorflowJsWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The multi-file weights.\nAll required files/folders should be a zip archive.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "tensorflow_version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Version of the TensorFlow library used."
        }
      },
      "required": [
        "source"
      ],
      "title": "model.v0_4.TensorflowJsWeightsDescr",
      "type": "object"
    },
    "TensorflowSavedModelBundleWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "tensorflow_version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Version of the TensorFlow library used."
        }
      },
      "required": [
        "source"
      ],
      "title": "model.v0_4.TensorflowSavedModelBundleWeightsDescr",
      "type": "object"
    },
    "TorchscriptWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Attachments that are specific to this weights entry."
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "dependencies": {
          "anyOf": [
            {
              "pattern": "^.+:.+$",
              "title": "Dependencies",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
          "examples": [
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt"
          ],
          "title": "Dependencies"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "pytorch_version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Version of the PyTorch library used."
        }
      },
      "required": [
        "source"
      ],
      "title": "model.v0_4.TorchscriptWeightsDescr",
      "type": "object"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    }
  },
  "additionalProperties": false,
  "properties": {
    "keras_hdf5": {
      "anyOf": [
        {
          "$ref": "#/$defs/KerasHdf5WeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    },
    "onnx": {
      "anyOf": [
        {
          "$ref": "#/$defs/OnnxWeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    },
    "pytorch_state_dict": {
      "anyOf": [
        {
          "$ref": "#/$defs/PytorchStateDictWeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    },
    "tensorflow_js": {
      "anyOf": [
        {
          "$ref": "#/$defs/TensorflowJsWeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    },
    "tensorflow_saved_model_bundle": {
      "anyOf": [
        {
          "$ref": "#/$defs/TensorflowSavedModelBundleWeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    },
    "torchscript": {
      "anyOf": [
        {
          "$ref": "#/$defs/TorchscriptWeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    }
  },
  "title": "model.v0_4.WeightsDescr",
  "type": "object"
}

Fields:

Validators:

available_formats property ¤

available_formats

keras_hdf5 pydantic-field ¤

keras_hdf5: Optional[KerasHdf5WeightsDescr] = None

missing_formats property ¤

missing_formats

onnx pydantic-field ¤

onnx: Optional[OnnxWeightsDescr] = None

pytorch_state_dict pydantic-field ¤

pytorch_state_dict: Optional[
    PytorchStateDictWeightsDescr
] = None

tensorflow_js pydantic-field ¤

tensorflow_js: Optional[TensorflowJsWeightsDescr] = None

tensorflow_saved_model_bundle pydantic-field ¤

tensorflow_saved_model_bundle: Optional[
    TensorflowSavedModelBundleWeightsDescr
] = None

torchscript pydantic-field ¤

torchscript: Optional[TorchscriptWeightsDescr] = None

__getitem__ ¤

__getitem__(key: WeightsFormat)
Source code in src/bioimageio/spec/model/v0_4.py
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
def __getitem__(
    self,
    key: WeightsFormat,
):
    if key == "keras_hdf5":
        ret = self.keras_hdf5
    elif key == "onnx":
        ret = self.onnx
    elif key == "pytorch_state_dict":
        ret = self.pytorch_state_dict
    elif key == "tensorflow_js":
        ret = self.tensorflow_js
    elif key == "tensorflow_saved_model_bundle":
        ret = self.tensorflow_saved_model_bundle
    elif key == "torchscript":
        ret = self.torchscript
    else:
        raise KeyError(key)

    if ret is None:
        raise KeyError(key)

    return ret

check_one_entry pydantic-validator ¤

check_one_entry() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
@model_validator(mode="after")
def check_one_entry(self) -> Self:
    if all(
        entry is None
        for entry in [
            self.keras_hdf5,
            self.onnx,
            self.pytorch_state_dict,
            self.tensorflow_js,
            self.tensorflow_saved_model_bundle,
            self.torchscript,
        ]
    ):
        raise ValueError("Missing weights entry")

    return self

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

WeightsEntryDescrBase pydantic-model ¤

Bases: FileDescr

Show JSON schema:
{
  "$defs": {
    "AttachmentsDescr": {
      "additionalProperties": true,
      "properties": {
        "files": {
          "description": "File attachments",
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Files",
          "type": "array"
        }
      },
      "title": "generic.v0_2.AttachmentsDescr",
      "type": "object"
    },
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_2.Author",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "The weights file.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "attachments": {
      "anyOf": [
        {
          "$ref": "#/$defs/AttachmentsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Attachments that are specific to this weights entry."
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "dependencies": {
      "anyOf": [
        {
          "pattern": "^.+:.+$",
          "title": "Dependencies",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`.",
      "examples": [
        "conda:environment.yaml",
        "maven:./pom.xml",
        "pip:./requirements.txt"
      ],
      "title": "Dependencies"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    }
  },
  "required": [
    "source"
  ],
  "title": "model.v0_4.WeightsEntryDescrBase",
  "type": "object"
}

Fields:

  • sha256 (Optional[Sha256])
  • source (FileSource_)
  • attachments (Annotated[Union[AttachmentsDescr, None], warn(None, 'Weights entry depends on additional attachments.', ALERT)])
  • authors (Union[List[Author], None])
  • dependencies (Annotated[Optional[Dependencies], warn(None, 'Custom dependencies ({value}) specified. Avoid this whenever possible ' + 'to allow execution in a wider range of software environments.'), Field(examples=['conda:environment.yaml', 'maven:./pom.xml', 'pip:./requirements.txt'])])
  • parent (Annotated[Optional[WeightsFormat], Field(examples=['pytorch_state_dict'])])

Validators:

attachments pydantic-field ¤

attachments: Annotated[
    Union[AttachmentsDescr, None],
    warn(
        None,
        "Weights entry depends on additional attachments.",
        ALERT,
    ),
] = None

Attachments that are specific to this weights entry.

authors pydantic-field ¤

authors: Union[List[Author], None] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

dependencies pydantic-field ¤

dependencies: Annotated[
    Optional[Dependencies],
    warn(
        None,
        "Custom dependencies ({value}) specified. Avoid this whenever possible "
        + "to allow execution in a wider range of software environments.",
    ),
    Field(
        examples=[
            "conda:environment.yaml",
            "maven:./pom.xml",
            "pip:./requirements.txt",
        ]
    ),
] = None

Dependency manager and dependency file, specified as <dependency manager>:<relative file path>.

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: FileSource_

The weights file.

type class-attribute ¤

weights_format_name class-attribute ¤

weights_format_name: str

check_parent_is_not_self pydantic-validator ¤

check_parent_is_not_self() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
288
289
290
291
292
293
@model_validator(mode="after")
def check_parent_is_not_self(self) -> Self:
    if self.type == self.parent:
        raise ValueError("Weights entry can't be it's own parent.")

    return self

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

ZeroMeanUnitVarianceDescr pydantic-model ¤

Bases: ProcessingDescrBase

Subtract mean and divide by variance.

Show JSON schema:
{
  "$defs": {
    "ZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ZeroMeanUnitVarianceDescr`",
      "properties": {
        "mode": {
          "default": "fixed",
          "description": "Mode for computing mean and variance.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n|   fixed     | Fixed values for mean and variance   |\n| per_dataset | Compute for the entire dataset       |\n| per_sample  | Compute for each sample individually |",
          "enum": [
            "fixed",
            "per_dataset",
            "per_sample"
          ],
          "title": "Mode",
          "type": "string"
        },
        "axes": {
          "description": "The subset of axes to normalize jointly.\nFor example `xy` to normalize the two image axes for 2d data jointly.",
          "examples": [
            "xy"
          ],
          "title": "Axes",
          "type": "string"
        },
        "mean": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The mean value(s) to use for `mode: fixed`.\nFor example `[1.1, 2.2, 3.3]` in the case of a 3 channel image with `axes: xy`.",
          "examples": [
            [
              1.1,
              2.2,
              3.3
            ]
          ],
          "title": "Mean"
        },
        "std": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The standard deviation values to use for `mode: fixed`. Analogous to mean.",
          "examples": [
            [
              0.1,
              0.2,
              0.3
            ]
          ],
          "title": "Std"
        },
        "eps": {
          "default": 1e-06,
          "description": "epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "required": [
        "axes"
      ],
      "title": "model.v0_4.ZeroMeanUnitVarianceKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Subtract mean and divide by variance.",
  "properties": {
    "name": {
      "const": "zero_mean_unit_variance",
      "title": "Name",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/ZeroMeanUnitVarianceKwargs"
    }
  },
  "required": [
    "name",
    "kwargs"
  ],
  "title": "model.v0_4.ZeroMeanUnitVarianceDescr",
  "type": "object"
}

Fields:

implemented_name class-attribute ¤

implemented_name: Literal["zero_mean_unit_variance"] = (
    "zero_mean_unit_variance"
)

kwargs pydantic-field ¤

name pydantic-field ¤

name: Literal["zero_mean_unit_variance"] = (
    "zero_mean_unit_variance"
)

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ZeroMeanUnitVarianceKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for ZeroMeanUnitVarianceDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `ZeroMeanUnitVarianceDescr`",
  "properties": {
    "mode": {
      "default": "fixed",
      "description": "Mode for computing mean and variance.\n|     mode    |             description              |\n| ----------- | ------------------------------------ |\n|   fixed     | Fixed values for mean and variance   |\n| per_dataset | Compute for the entire dataset       |\n| per_sample  | Compute for each sample individually |",
      "enum": [
        "fixed",
        "per_dataset",
        "per_sample"
      ],
      "title": "Mode",
      "type": "string"
    },
    "axes": {
      "description": "The subset of axes to normalize jointly.\nFor example `xy` to normalize the two image axes for 2d data jointly.",
      "examples": [
        "xy"
      ],
      "title": "Axes",
      "type": "string"
    },
    "mean": {
      "anyOf": [
        {
          "type": "number"
        },
        {
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The mean value(s) to use for `mode: fixed`.\nFor example `[1.1, 2.2, 3.3]` in the case of a 3 channel image with `axes: xy`.",
      "examples": [
        [
          1.1,
          2.2,
          3.3
        ]
      ],
      "title": "Mean"
    },
    "std": {
      "anyOf": [
        {
          "type": "number"
        },
        {
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The standard deviation values to use for `mode: fixed`. Analogous to mean.",
      "examples": [
        [
          0.1,
          0.2,
          0.3
        ]
      ],
      "title": "Std"
    },
    "eps": {
      "default": 1e-06,
      "description": "epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`.",
      "exclusiveMinimum": 0,
      "maximum": 0.1,
      "title": "Eps",
      "type": "number"
    }
  },
  "required": [
    "axes"
  ],
  "title": "model.v0_4.ZeroMeanUnitVarianceKwargs",
  "type": "object"
}

Fields:

  • mode (Literal['fixed', 'per_dataset', 'per_sample'])
  • axes (Annotated[AxesInCZYX, Field(examples=['xy'])])
  • mean (Annotated[Union[float, NotEmpty[List[float]], None], Field(examples=[(1.1, 2.2, 3.3)])])
  • std (Annotated[Union[float, NotEmpty[List[float]], None], Field(examples=[(0.1, 0.2, 0.3)])])
  • eps (Annotated[float, Interval(gt=0, le=0.1)])

Validators:

axes pydantic-field ¤

axes: Annotated[AxesInCZYX, Field(examples=['xy'])]

The subset of axes to normalize jointly. For example xy to normalize the two image axes for 2d data jointly.

eps pydantic-field ¤

eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-06

epsilon for numeric stability: out = (tensor - mean) / (std + eps).

mean pydantic-field ¤

mean: Annotated[
    Union[float, NotEmpty[List[float]], None],
    Field(examples=[(1.1, 2.2, 3.3)]),
] = None

The mean value(s) to use for mode: fixed. For example [1.1, 2.2, 3.3] in the case of a 3 channel image with axes: xy.

mode pydantic-field ¤

mode: Literal["fixed", "per_dataset", "per_sample"] = (
    "fixed"
)

Mode for computing mean and variance. | mode | description | | ----------- | ------------------------------------ | | fixed | Fixed values for mean and variance | | per_dataset | Compute for the entire dataset | | per_sample | Compute for each sample individually |

std pydantic-field ¤

std: Annotated[
    Union[float, NotEmpty[List[float]], None],
    Field(examples=[(0.1, 0.2, 0.3)]),
] = None

The standard deviation values to use for mode: fixed. Analogous to mean.

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

mean_and_std_match_mode pydantic-validator ¤

mean_and_std_match_mode() -> Self
Source code in src/bioimageio/spec/model/v0_4.py
767
768
769
770
771
772
773
774
@model_validator(mode="after")
def mean_and_std_match_mode(self) -> Self:
    if self.mode == "fixed" and (self.mean is None or self.std is None):
        raise ValueError("`mean` and `std` are required for `mode: fixed`.")
    elif self.mode != "fixed" and (self.mean is not None or self.std is not None):
        raise ValueError(f"`mean` and `std` not allowed for `mode: {self.mode}`")

    return self

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

package_weights ¤

package_weights(
    value: Node,
    handler: SerializerFunctionWrapHandler,
    info: SerializationInfo,
)
Source code in src/bioimageio/spec/model/v0_4.py
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
def package_weights(
    value: Node,  # Union[v0_4.WeightsDescr, v0_5.WeightsDescr]
    handler: SerializerFunctionWrapHandler,
    info: SerializationInfo,
):
    ctxt = packaging_context_var.get()
    if ctxt is not None and ctxt.weights_priority_order is not None:
        for wf in ctxt.weights_priority_order:
            w = getattr(value, wf, None)
            if w is not None:
                break
        else:
            raise ValueError(
                "None of the weight formats in `weights_priority_order`"
                + f" ({ctxt.weights_priority_order}) is present in the given model."
            )

        assert isinstance(w, Node), type(w)
        # construct WeightsDescr with new single weight format entry
        new_w = w.model_construct(**{k: v for k, v in w if k != "parent"})
        value = value.model_construct(None, **{wf: new_w})

    return handler(
        value,
        info,  # pyright: ignore[reportArgumentType]  # taken from pydantic docs
    )

v0_5 ¤

Classes:

Functions:

Attributes:

ANY_AXIS_TYPES module-attribute ¤

intended for isinstance comparisons in py<3.10

AnyAxis module-attribute ¤

AnyAxis = Union[InputAxis, OutputAxis]

AxisType module-attribute ¤

AxisType = Literal[
    "batch", "channel", "index", "time", "space"
]

BATCH_AXIS_ID module-attribute ¤

BATCH_AXIS_ID = AxisId('batch')

FileDescr_dependencies module-attribute ¤

FileDescr_dependencies = Annotated[
    FileDescr_,
    WithSuffix((".yaml", ".yml"), case_sensitive=True),
    Field(examples=[dict(source="environment.yaml")]),
]

FileDescr_external_data module-attribute ¤

FileDescr_external_data = Annotated[
    FileDescr_,
    WithSuffix(".data", case_sensitive=True),
    Field(examples=[dict(source="weights.onnx.data")]),
]

INPUT_AXIS_TYPES module-attribute ¤

intended for isinstance comparisons in py<3.10

IO_AxisT module-attribute ¤

IO_AxisT = TypeVar('IO_AxisT', InputAxis, OutputAxis)

InputAxis module-attribute ¤

InputAxis = Annotated[
    _InputAxisUnion, Discriminator("type")
]

IntervalOrRatioDType module-attribute ¤

IntervalOrRatioDType = Literal[
    "float32",
    "float64",
    "uint8",
    "int8",
    "uint16",
    "int16",
    "uint32",
    "int32",
    "uint64",
    "int64",
]

NominalOrOrdinalDType module-attribute ¤

NominalOrOrdinalDType = Literal[
    "float32",
    "float64",
    "uint8",
    "int8",
    "uint16",
    "int16",
    "uint32",
    "int32",
    "uint64",
    "int64",
    "bool",
]

OUTPUT_AXIS_TYPES module-attribute ¤

intended for isinstance comparisons in py<3.10

OutputAxis module-attribute ¤

OutputAxis = Annotated[
    _OutputAxisUnion, Discriminator("type")
]

ParameterizedSize_N module-attribute ¤

ParameterizedSize_N = int

Annotates an integer to calculate a concrete axis size from a ParameterizedSize.

PostprocessingDescr module-attribute ¤

PostprocessingId module-attribute ¤

PostprocessingId = Literal[
    "binarize",
    "clip",
    "ensure_dtype",
    "fixed_zero_mean_unit_variance",
    "scale_linear",
    "scale_mean_variance",
    "scale_range",
    "sigmoid",
    "softmax",
    "zero_mean_unit_variance",
]

PreprocessingDescr module-attribute ¤

PreprocessingId module-attribute ¤

PreprocessingId = Literal[
    "binarize",
    "clip",
    "ensure_dtype",
    "fixed_zero_mean_unit_variance",
    "scale_linear",
    "scale_range",
    "sigmoid",
    "softmax",
]

SAME_AS_TYPE module-attribute ¤

SAME_AS_TYPE = '<same as type>'

SpaceUnit module-attribute ¤

SpaceUnit = Literal[
    "attometer",
    "angstrom",
    "centimeter",
    "decimeter",
    "exameter",
    "femtometer",
    "foot",
    "gigameter",
    "hectometer",
    "inch",
    "kilometer",
    "megameter",
    "meter",
    "micrometer",
    "mile",
    "millimeter",
    "nanometer",
    "parsec",
    "petameter",
    "picometer",
    "terameter",
    "yard",
    "yoctometer",
    "yottameter",
    "zeptometer",
    "zettameter",
]

TVs module-attribute ¤

TVs = Union[
    NotEmpty[List[int]],
    NotEmpty[List[float]],
    NotEmpty[List[bool]],
    NotEmpty[List[str]],
]

TensorDataDescr module-attribute ¤

TensorDescr module-attribute ¤

TensorDescr = Union[InputTensorDescr, OutputTensorDescr]

TimeUnit module-attribute ¤

TimeUnit = Literal[
    "attosecond",
    "centisecond",
    "day",
    "decisecond",
    "exasecond",
    "femtosecond",
    "gigasecond",
    "hectosecond",
    "hour",
    "kilosecond",
    "megasecond",
    "microsecond",
    "millisecond",
    "minute",
    "nanosecond",
    "petasecond",
    "picosecond",
    "second",
    "terasecond",
    "yoctosecond",
    "yottasecond",
    "zeptosecond",
    "zettasecond",
]

ArchitectureFromFileDescr pydantic-model ¤

Bases: _ArchitectureCallableDescr, FileDescr

Show JSON schema:
{
  "$defs": {
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "YamlValue": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "format": "date",
          "type": "string"
        },
        {
          "format": "date-time",
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        },
        {
          "type": "string"
        },
        {
          "items": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "array"
        },
        {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "object"
        },
        {
          "type": "null"
        }
      ]
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "Architecture source file",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "callable": {
      "description": "Identifier of the callable that returns a torch.nn.Module instance.",
      "examples": [
        "MyNetworkClass",
        "get_my_model"
      ],
      "minLength": 1,
      "title": "Identifier",
      "type": "string"
    },
    "kwargs": {
      "additionalProperties": {
        "$ref": "#/$defs/YamlValue"
      },
      "description": "key word arguments for the `callable`",
      "title": "Kwargs",
      "type": "object"
    }
  },
  "required": [
    "source",
    "callable"
  ],
  "title": "model.v0_5.ArchitectureFromFileDescr",
  "type": "object"
}

Fields:

callable pydantic-field ¤

callable: Annotated[
    Identifier,
    Field(examples=["MyNetworkClass", "get_my_model"]),
]

Identifier of the callable that returns a torch.nn.Module instance.

kwargs pydantic-field ¤

kwargs: Dict[str, YamlValue]

key word arguments for the callable

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: Annotated[
    FileSource, AfterValidator(wo_special_file_name)
]

Architecture source file

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

ArchitectureFromLibraryDescr pydantic-model ¤

Bases: _ArchitectureCallableDescr

Show JSON schema:
{
  "$defs": {
    "YamlValue": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "format": "date",
          "type": "string"
        },
        {
          "format": "date-time",
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        },
        {
          "type": "string"
        },
        {
          "items": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "array"
        },
        {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "object"
        },
        {
          "type": "null"
        }
      ]
    }
  },
  "additionalProperties": false,
  "properties": {
    "callable": {
      "description": "Identifier of the callable that returns a torch.nn.Module instance.",
      "examples": [
        "MyNetworkClass",
        "get_my_model"
      ],
      "minLength": 1,
      "title": "Identifier",
      "type": "string"
    },
    "kwargs": {
      "additionalProperties": {
        "$ref": "#/$defs/YamlValue"
      },
      "description": "key word arguments for the `callable`",
      "title": "Kwargs",
      "type": "object"
    },
    "import_from": {
      "description": "Where to import the callable from, i.e. `from <import_from> import <callable>`",
      "title": "Import From",
      "type": "string"
    }
  },
  "required": [
    "callable",
    "import_from"
  ],
  "title": "model.v0_5.ArchitectureFromLibraryDescr",
  "type": "object"
}

Fields:

callable pydantic-field ¤

callable: Annotated[
    Identifier,
    Field(examples=["MyNetworkClass", "get_my_model"]),
]

Identifier of the callable that returns a torch.nn.Module instance.

import_from pydantic-field ¤

import_from: str

Where to import the callable from, i.e. from <import_from> import <callable>

kwargs pydantic-field ¤

kwargs: Dict[str, YamlValue]

key word arguments for the callable

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

AxisBase pydantic-model ¤

Bases: NodeWithExplicitlySetFields

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "id": {
      "description": "An axis id unique across all axes of one tensor.",
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    }
  },
  "required": [
    "id"
  ],
  "title": "model.v0_5.AxisBase",
  "type": "object"
}

Fields:

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

id: AxisId

An axis id unique across all axes of one tensor.

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

AxisId ¤

Bases: LowerCaseIdentifier


              flowchart TD
              bioimageio.spec.model.v0_5.AxisId[AxisId]
              bioimageio.spec._internal.types.LowerCaseIdentifier[LowerCaseIdentifier]
              bioimageio.spec._internal.validated_string.ValidatedString[ValidatedString]

                              bioimageio.spec._internal.types.LowerCaseIdentifier --> bioimageio.spec.model.v0_5.AxisId
                                bioimageio.spec._internal.validated_string.ValidatedString --> bioimageio.spec._internal.types.LowerCaseIdentifier
                



              click bioimageio.spec.model.v0_5.AxisId href "" "bioimageio.spec.model.v0_5.AxisId"
              click bioimageio.spec._internal.types.LowerCaseIdentifier href "" "bioimageio.spec._internal.types.LowerCaseIdentifier"
              click bioimageio.spec._internal.validated_string.ValidatedString href "" "bioimageio.spec._internal.validated_string.ValidatedString"
            

Methods:

Attributes:

root_model class-attribute ¤

root_model: Type[RootModel[Any]] = RootModel[
    Annotated[
        LowerCaseIdentifierAnno,
        MaxLen(16),
        AfterValidator(_normalize_axis_id),
    ]
]

__get_pydantic_core_schema__ classmethod ¤

__get_pydantic_core_schema__(
    source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema
Source code in src/bioimageio/spec/_internal/validated_string.py
29
30
31
32
33
@classmethod
def __get_pydantic_core_schema__(
    cls, source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema:
    return no_info_after_validator_function(cls, handler(str))

__get_pydantic_json_schema__ classmethod ¤

__get_pydantic_json_schema__(
    core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue
Source code in src/bioimageio/spec/_internal/validated_string.py
35
36
37
38
39
40
41
42
43
44
@classmethod
def __get_pydantic_json_schema__(
    cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
    json_schema = cls.root_model.model_json_schema(mode=handler.mode)
    json_schema["title"] = cls.__name__.strip("_")
    if cls.__doc__:
        json_schema["description"] = cls.__doc__

    return json_schema

__new__ ¤

__new__(object: object)
Source code in src/bioimageio/spec/_internal/validated_string.py
19
20
21
22
23
def __new__(cls, object: object):
    _validated = cls.root_model.model_validate(object).root
    self = super().__new__(cls, _validated)
    self._validated = _validated
    return self._after_validator()

BatchAxis pydantic-model ¤

Bases: AxisBase

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "id": {
      "default": "batch",
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "batch",
      "title": "Type",
      "type": "string"
    },
    "size": {
      "anyOf": [
        {
          "const": 1,
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The batch size may be fixed to 1,\notherwise (the default) it may be chosen arbitrarily depending on available memory",
      "title": "Size"
    }
  },
  "required": [
    "type"
  ],
  "title": "model.v0_5.BatchAxis",
  "type": "object"
}

Fields:

  • description (Annotated[str, MaxLen(128)])
  • type (Literal['batch'])
  • id (Annotated[AxisId, Predicate(_is_batch)])
  • size (Optional[Literal[1]])

concatenable property ¤

concatenable

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

id: Annotated[AxisId, Predicate(_is_batch)] = BATCH_AXIS_ID

implemented_type class-attribute ¤

implemented_type: Literal['batch'] = 'batch'

scale property ¤

scale

size pydantic-field ¤

size: Optional[Literal[1]] = None

The batch size may be fixed to 1, otherwise (the default) it may be chosen arbitrarily depending on available memory

type pydantic-field ¤

type: Literal['batch'] = 'batch'

unit property ¤

unit

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

BinarizeAlongAxisKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for BinarizeDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `BinarizeDescr`",
  "properties": {
    "threshold": {
      "description": "The fixed threshold values along `axis`",
      "items": {
        "type": "number"
      },
      "minItems": 1,
      "title": "Threshold",
      "type": "array"
    },
    "axis": {
      "description": "The `threshold` axis",
      "examples": [
        "channel"
      ],
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    }
  },
  "required": [
    "threshold",
    "axis"
  ],
  "title": "model.v0_5.BinarizeAlongAxisKwargs",
  "type": "object"
}

Fields:

axis pydantic-field ¤

axis: Annotated[NonBatchAxisId, Field(examples=["channel"])]

The threshold axis

threshold pydantic-field ¤

threshold: NotEmpty[List[float]]

The fixed threshold values along axis

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

BinarizeDescr pydantic-model ¤

Bases: ProcessingDescrBase

Binarize the tensor with a fixed threshold.

Values above BinarizeKwargs.threshold/BinarizeAlongAxisKwargs.threshold will be set to one, values below the threshold to zero.

Examples:

  • in YAML
    postprocessing:
      - id: binarize
        kwargs:
          axis: 'channel'
          threshold: [0.25, 0.5, 0.75]
    
  • in Python: >>> postprocessing = [BinarizeDescr( ... kwargs=BinarizeAlongAxisKwargs( ... axis=AxisId('channel'), ... threshold=[0.25, 0.5, 0.75], ... ) ... )]
Show JSON schema:
{
  "$defs": {
    "BinarizeAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold values along `axis`",
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "title": "Threshold",
          "type": "array"
        },
        "axis": {
          "description": "The `threshold` axis",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "required": [
        "threshold",
        "axis"
      ],
      "title": "model.v0_5.BinarizeAlongAxisKwargs",
      "type": "object"
    },
    "BinarizeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold",
          "title": "Threshold",
          "type": "number"
        }
      },
      "required": [
        "threshold"
      ],
      "title": "model.v0_5.BinarizeKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Binarize the tensor with a fixed threshold.\n\nValues above `BinarizeKwargs.threshold`/`BinarizeAlongAxisKwargs.threshold`\nwill be set to one, values below the threshold to zero.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: binarize\n        kwargs:\n          axis: 'channel'\n          threshold: [0.25, 0.5, 0.75]\n    ```\n- in Python:\n    >>> postprocessing = [BinarizeDescr(\n    ...   kwargs=BinarizeAlongAxisKwargs(\n    ...       axis=AxisId('channel'),\n    ...       threshold=[0.25, 0.5, 0.75],\n    ...   )\n    ... )]",
  "properties": {
    "id": {
      "const": "binarize",
      "title": "Id",
      "type": "string"
    },
    "kwargs": {
      "anyOf": [
        {
          "$ref": "#/$defs/BinarizeKwargs"
        },
        {
          "$ref": "#/$defs/BinarizeAlongAxisKwargs"
        }
      ],
      "title": "Kwargs"
    }
  },
  "required": [
    "id",
    "kwargs"
  ],
  "title": "model.v0_5.BinarizeDescr",
  "type": "object"
}

Fields:

id pydantic-field ¤

id: Literal['binarize'] = 'binarize'

implemented_id class-attribute ¤

implemented_id: Literal['binarize'] = 'binarize'

kwargs pydantic-field ¤

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

BinarizeKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for BinarizeDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `BinarizeDescr`",
  "properties": {
    "threshold": {
      "description": "The fixed threshold",
      "title": "Threshold",
      "type": "number"
    }
  },
  "required": [
    "threshold"
  ],
  "title": "model.v0_5.BinarizeKwargs",
  "type": "object"
}

Fields:

threshold pydantic-field ¤

threshold: float

The fixed threshold

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

BioimageioConfig pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "$defs": {
    "ReproducibilityTolerance": {
      "additionalProperties": true,
      "description": "Describes what small numerical differences -- if any -- may be tolerated\nin the generated output when executing in different environments.\n\nA tensor element *output* is considered mismatched to the **test_tensor** if\nabs(*output* - **test_tensor**) > **absolute_tolerance** + **relative_tolerance** * abs(**test_tensor**).\n(Internally we call [numpy.testing.assert_allclose](https://numpy.org/doc/stable/reference/generated/numpy.testing.assert_allclose.html).)\n\nMotivation:\n    For testing we can request the respective deep learning frameworks to be as\n    reproducible as possible by setting seeds and chosing deterministic algorithms,\n    but differences in operating systems, available hardware and installed drivers\n    may still lead to numerical differences.",
      "properties": {
        "relative_tolerance": {
          "default": 0.001,
          "description": "Maximum relative tolerance of reproduced test tensor.",
          "maximum": 0.01,
          "minimum": 0,
          "title": "Relative Tolerance",
          "type": "number"
        },
        "absolute_tolerance": {
          "default": 0.0001,
          "description": "Maximum absolute tolerance of reproduced test tensor.",
          "minimum": 0,
          "title": "Absolute Tolerance",
          "type": "number"
        },
        "mismatched_elements_per_million": {
          "default": 100,
          "description": "Maximum number of mismatched elements/pixels per million to tolerate.",
          "maximum": 1000,
          "minimum": 0,
          "title": "Mismatched Elements Per Million",
          "type": "integer"
        },
        "output_ids": {
          "default": [],
          "description": "Limits the output tensor IDs these reproducibility details apply to.",
          "items": {
            "maxLength": 32,
            "minLength": 1,
            "title": "TensorId",
            "type": "string"
          },
          "title": "Output Ids",
          "type": "array"
        },
        "weights_formats": {
          "default": [],
          "description": "Limits the weights formats these details apply to.",
          "items": {
            "enum": [
              "keras_hdf5",
              "onnx",
              "pytorch_state_dict",
              "tensorflow_js",
              "tensorflow_saved_model_bundle",
              "torchscript"
            ],
            "type": "string"
          },
          "title": "Weights Formats",
          "type": "array"
        }
      },
      "title": "model.v0_5.ReproducibilityTolerance",
      "type": "object"
    }
  },
  "additionalProperties": true,
  "properties": {
    "reproducibility_tolerance": {
      "default": [],
      "description": "Tolerances to allow when reproducing the model's test outputs\nfrom the model's test inputs.\nOnly the first entry matching tensor id and weights format is considered.",
      "items": {
        "$ref": "#/$defs/ReproducibilityTolerance"
      },
      "title": "Reproducibility Tolerance",
      "type": "array"
    }
  },
  "title": "model.v0_5.BioimageioConfig",
  "type": "object"
}

Fields:

reproducibility_tolerance pydantic-field ¤

reproducibility_tolerance: Sequence[
    ReproducibilityTolerance
] = ()

Tolerances to allow when reproducing the model's test outputs from the model's test inputs. Only the first entry matching tensor id and weights format is considered.

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ChannelAxis pydantic-model ¤

Bases: AxisBase

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "id": {
      "default": "channel",
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "channel",
      "title": "Type",
      "type": "string"
    },
    "channel_names": {
      "items": {
        "minLength": 1,
        "title": "Identifier",
        "type": "string"
      },
      "minItems": 1,
      "title": "Channel Names",
      "type": "array"
    }
  },
  "required": [
    "type",
    "channel_names"
  ],
  "title": "model.v0_5.ChannelAxis",
  "type": "object"
}

Fields:

channel_names pydantic-field ¤

channel_names: NotEmpty[List[Identifier]]

concatenable property ¤

concatenable

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

implemented_type class-attribute ¤

implemented_type: Literal['channel'] = 'channel'

scale property ¤

scale: float

size property ¤

size: int

type pydantic-field ¤

type: Literal['channel'] = 'channel'

unit property ¤

unit

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ClipDescr pydantic-model ¤

Bases: ProcessingDescrBase

Set tensor values below min to min and above max to max.

See ScaleRangeDescr for examples.

Show JSON schema:
{
  "$defs": {
    "ClipKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ClipDescr`",
      "properties": {
        "min": {
          "description": "minimum value for clipping",
          "title": "Min",
          "type": "number"
        },
        "max": {
          "description": "maximum value for clipping",
          "title": "Max",
          "type": "number"
        }
      },
      "required": [
        "min",
        "max"
      ],
      "title": "model.v0_4.ClipKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Set tensor values below min to min and above max to max.\n\nSee `ScaleRangeDescr` for examples.",
  "properties": {
    "id": {
      "const": "clip",
      "title": "Id",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/ClipKwargs"
    }
  },
  "required": [
    "id",
    "kwargs"
  ],
  "title": "model.v0_5.ClipDescr",
  "type": "object"
}

Fields:

id pydantic-field ¤

id: Literal['clip'] = 'clip'

implemented_id class-attribute ¤

implemented_id: Literal['clip'] = 'clip'

kwargs pydantic-field ¤

kwargs: ClipKwargs

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

Config pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "$defs": {
    "BioimageioConfig": {
      "additionalProperties": true,
      "properties": {
        "reproducibility_tolerance": {
          "default": [],
          "description": "Tolerances to allow when reproducing the model's test outputs\nfrom the model's test inputs.\nOnly the first entry matching tensor id and weights format is considered.",
          "items": {
            "$ref": "#/$defs/ReproducibilityTolerance"
          },
          "title": "Reproducibility Tolerance",
          "type": "array"
        }
      },
      "title": "model.v0_5.BioimageioConfig",
      "type": "object"
    },
    "ReproducibilityTolerance": {
      "additionalProperties": true,
      "description": "Describes what small numerical differences -- if any -- may be tolerated\nin the generated output when executing in different environments.\n\nA tensor element *output* is considered mismatched to the **test_tensor** if\nabs(*output* - **test_tensor**) > **absolute_tolerance** + **relative_tolerance** * abs(**test_tensor**).\n(Internally we call [numpy.testing.assert_allclose](https://numpy.org/doc/stable/reference/generated/numpy.testing.assert_allclose.html).)\n\nMotivation:\n    For testing we can request the respective deep learning frameworks to be as\n    reproducible as possible by setting seeds and chosing deterministic algorithms,\n    but differences in operating systems, available hardware and installed drivers\n    may still lead to numerical differences.",
      "properties": {
        "relative_tolerance": {
          "default": 0.001,
          "description": "Maximum relative tolerance of reproduced test tensor.",
          "maximum": 0.01,
          "minimum": 0,
          "title": "Relative Tolerance",
          "type": "number"
        },
        "absolute_tolerance": {
          "default": 0.0001,
          "description": "Maximum absolute tolerance of reproduced test tensor.",
          "minimum": 0,
          "title": "Absolute Tolerance",
          "type": "number"
        },
        "mismatched_elements_per_million": {
          "default": 100,
          "description": "Maximum number of mismatched elements/pixels per million to tolerate.",
          "maximum": 1000,
          "minimum": 0,
          "title": "Mismatched Elements Per Million",
          "type": "integer"
        },
        "output_ids": {
          "default": [],
          "description": "Limits the output tensor IDs these reproducibility details apply to.",
          "items": {
            "maxLength": 32,
            "minLength": 1,
            "title": "TensorId",
            "type": "string"
          },
          "title": "Output Ids",
          "type": "array"
        },
        "weights_formats": {
          "default": [],
          "description": "Limits the weights formats these details apply to.",
          "items": {
            "enum": [
              "keras_hdf5",
              "onnx",
              "pytorch_state_dict",
              "tensorflow_js",
              "tensorflow_saved_model_bundle",
              "torchscript"
            ],
            "type": "string"
          },
          "title": "Weights Formats",
          "type": "array"
        }
      },
      "title": "model.v0_5.ReproducibilityTolerance",
      "type": "object"
    }
  },
  "additionalProperties": true,
  "properties": {
    "bioimageio": {
      "$ref": "#/$defs/BioimageioConfig"
    }
  },
  "title": "model.v0_5.Config",
  "type": "object"
}

Fields:

bioimageio pydantic-field ¤

bioimageio: BioimageioConfig

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

DataDependentSize pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "min": {
      "default": 1,
      "exclusiveMinimum": 0,
      "title": "Min",
      "type": "integer"
    },
    "max": {
      "anyOf": [
        {
          "exclusiveMinimum": 1,
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "title": "Max"
    }
  },
  "title": "model.v0_5.DataDependentSize",
  "type": "object"
}

Fields:

  • min (Annotated[int, Gt(0)])
  • max (Annotated[Optional[int], Gt(1)])

Validators:

  • _validate_max_gt_min

max pydantic-field ¤

max: Annotated[Optional[int], Gt(1)] = None

min pydantic-field ¤

min: Annotated[int, Gt(0)] = 1

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_size ¤

validate_size(size: int) -> int
Source code in src/bioimageio/spec/model/v0_5.py
343
344
345
346
347
348
349
350
def validate_size(self, size: int) -> int:
    if size < self.min:
        raise ValueError(f"size {size} < {self.min}")

    if self.max is not None and size > self.max:
        raise ValueError(f"size {size} > {self.max}")

    return size

EnsureDtypeDescr pydantic-model ¤

Bases: ProcessingDescrBase

Cast the tensor data type to EnsureDtypeKwargs.dtype (if not matching).

This can for example be used to ensure the inner neural network model gets a different input tensor data type than the fully described bioimage.io model does.

Examples:

The described bioimage.io model (incl. preprocessing) accepts any float32-compatible tensor, normalizes it with percentiles and clipping and then casts it to uint8, which is what the neural network in this example expects. - in YAML

inputs:
- data:
    type: float32  # described bioimage.io model is compatible with any float32 input tensor
  preprocessing:
  - id: scale_range
      kwargs:
      axes: ['y', 'x']
      max_percentile: 99.8
      min_percentile: 5.0
  - id: clip
      kwargs:
      min: 0.0
      max: 1.0
  - id: ensure_dtype  # the neural network of the model requires uint8
      kwargs:
      dtype: uint8
- in Python: >>> preprocessing = [ ... ScaleRangeDescr( ... kwargs=ScaleRangeKwargs( ... axes= (AxisId('y'), AxisId('x')), ... max_percentile= 99.8, ... min_percentile= 5.0, ... ) ... ), ... ClipDescr(kwargs=ClipKwargs(min=0.0, max=1.0)), ... EnsureDtypeDescr(kwargs=EnsureDtypeKwargs(dtype="uint8")), ... ]

Show JSON schema:
{
  "$defs": {
    "EnsureDtypeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `EnsureDtypeDescr`",
      "properties": {
        "dtype": {
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64",
            "bool"
          ],
          "title": "Dtype",
          "type": "string"
        }
      },
      "required": [
        "dtype"
      ],
      "title": "model.v0_5.EnsureDtypeKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Cast the tensor data type to `EnsureDtypeKwargs.dtype` (if not matching).\n\nThis can for example be used to ensure the inner neural network model gets a\ndifferent input tensor data type than the fully described bioimage.io model does.\n\nExamples:\n    The described bioimage.io model (incl. preprocessing) accepts any\n    float32-compatible tensor, normalizes it with percentiles and clipping and then\n    casts it to uint8, which is what the neural network in this example expects.\n    - in YAML\n        ```yaml\n        inputs:\n        - data:\n            type: float32  # described bioimage.io model is compatible with any float32 input tensor\n          preprocessing:\n          - id: scale_range\n              kwargs:\n              axes: ['y', 'x']\n              max_percentile: 99.8\n              min_percentile: 5.0\n          - id: clip\n              kwargs:\n              min: 0.0\n              max: 1.0\n          - id: ensure_dtype  # the neural network of the model requires uint8\n              kwargs:\n              dtype: uint8\n        ```\n    - in Python:\n        >>> preprocessing = [\n        ...     ScaleRangeDescr(\n        ...         kwargs=ScaleRangeKwargs(\n        ...           axes= (AxisId('y'), AxisId('x')),\n        ...           max_percentile= 99.8,\n        ...           min_percentile= 5.0,\n        ...         )\n        ...     ),\n        ...     ClipDescr(kwargs=ClipKwargs(min=0.0, max=1.0)),\n        ...     EnsureDtypeDescr(kwargs=EnsureDtypeKwargs(dtype=\"uint8\")),\n        ... ]",
  "properties": {
    "id": {
      "const": "ensure_dtype",
      "title": "Id",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/EnsureDtypeKwargs"
    }
  },
  "required": [
    "id",
    "kwargs"
  ],
  "title": "model.v0_5.EnsureDtypeDescr",
  "type": "object"
}

Fields:

id pydantic-field ¤

id: Literal['ensure_dtype'] = 'ensure_dtype'

implemented_id class-attribute ¤

implemented_id: Literal['ensure_dtype'] = 'ensure_dtype'

kwargs pydantic-field ¤

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

EnsureDtypeKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for EnsureDtypeDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `EnsureDtypeDescr`",
  "properties": {
    "dtype": {
      "enum": [
        "float32",
        "float64",
        "uint8",
        "int8",
        "uint16",
        "int16",
        "uint32",
        "int32",
        "uint64",
        "int64",
        "bool"
      ],
      "title": "Dtype",
      "type": "string"
    }
  },
  "required": [
    "dtype"
  ],
  "title": "model.v0_5.EnsureDtypeKwargs",
  "type": "object"
}

Fields:

  • dtype (Literal['float32', 'float64', 'uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64', 'bool'])

dtype pydantic-field ¤

dtype: Literal[
    "float32",
    "float64",
    "uint8",
    "int8",
    "uint16",
    "int16",
    "uint32",
    "int32",
    "uint64",
    "int64",
    "bool",
]

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

FixedZeroMeanUnitVarianceAlongAxisKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for FixedZeroMeanUnitVarianceDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `FixedZeroMeanUnitVarianceDescr`",
  "properties": {
    "mean": {
      "description": "The mean value(s) to normalize with.",
      "items": {
        "type": "number"
      },
      "minItems": 1,
      "title": "Mean",
      "type": "array"
    },
    "std": {
      "description": "The standard deviation value(s) to normalize with.\nSize must match `mean` values.",
      "items": {
        "minimum": 1e-06,
        "type": "number"
      },
      "minItems": 1,
      "title": "Std",
      "type": "array"
    },
    "axis": {
      "description": "The axis of the mean/std values to normalize each entry along that dimension\nseparately.",
      "examples": [
        "channel",
        "index"
      ],
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    }
  },
  "required": [
    "mean",
    "std",
    "axis"
  ],
  "title": "model.v0_5.FixedZeroMeanUnitVarianceAlongAxisKwargs",
  "type": "object"
}

Fields:

  • mean (NotEmpty[List[float]])
  • std (NotEmpty[List[Annotated[float, Ge(1e-06)]]])
  • axis (Annotated[NonBatchAxisId, Field(examples=['channel', 'index'])])

Validators:

  • _mean_and_std_match

axis pydantic-field ¤

axis: Annotated[
    NonBatchAxisId, Field(examples=["channel", "index"])
]

The axis of the mean/std values to normalize each entry along that dimension separately.

mean pydantic-field ¤

mean: NotEmpty[List[float]]

The mean value(s) to normalize with.

std pydantic-field ¤

std: NotEmpty[List[Annotated[float, Ge(1e-06)]]]

The standard deviation value(s) to normalize with. Size must match mean values.

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

FixedZeroMeanUnitVarianceDescr pydantic-model ¤

Bases: ProcessingDescrBase

Subtract a given mean and divide by the standard deviation.

Normalize with fixed, precomputed values for FixedZeroMeanUnitVarianceKwargs.mean and FixedZeroMeanUnitVarianceKwargs.std Use FixedZeroMeanUnitVarianceAlongAxisKwargs for independent scaling along given axes.

Examples:

  1. scalar value for whole tensor

    • in YAML
      preprocessing:
        - id: fixed_zero_mean_unit_variance
          kwargs:
            mean: 103.5
            std: 13.7
      
    • in Python

      preprocessing = [FixedZeroMeanUnitVarianceDescr( ... kwargs=FixedZeroMeanUnitVarianceKwargs(mean=103.5, std=13.7) ... )]

  2. independently along an axis

    • in YAML
      preprocessing:
        - id: fixed_zero_mean_unit_variance
          kwargs:
            axis: channel
            mean: [101.5, 102.5, 103.5]
            std: [11.7, 12.7, 13.7]
      
    • in Python

      preprocessing = [FixedZeroMeanUnitVarianceDescr( ... kwargs=FixedZeroMeanUnitVarianceAlongAxisKwargs( ... axis=AxisId("channel"), ... mean=[101.5, 102.5, 103.5], ... std=[11.7, 12.7, 13.7], ... ) ... )]

Show JSON schema:
{
  "$defs": {
    "FixedZeroMeanUnitVarianceAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `FixedZeroMeanUnitVarianceDescr`",
      "properties": {
        "mean": {
          "description": "The mean value(s) to normalize with.",
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "title": "Mean",
          "type": "array"
        },
        "std": {
          "description": "The standard deviation value(s) to normalize with.\nSize must match `mean` values.",
          "items": {
            "minimum": 1e-06,
            "type": "number"
          },
          "minItems": 1,
          "title": "Std",
          "type": "array"
        },
        "axis": {
          "description": "The axis of the mean/std values to normalize each entry along that dimension\nseparately.",
          "examples": [
            "channel",
            "index"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "required": [
        "mean",
        "std",
        "axis"
      ],
      "title": "model.v0_5.FixedZeroMeanUnitVarianceAlongAxisKwargs",
      "type": "object"
    },
    "FixedZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `FixedZeroMeanUnitVarianceDescr`",
      "properties": {
        "mean": {
          "description": "The mean value to normalize with.",
          "title": "Mean",
          "type": "number"
        },
        "std": {
          "description": "The standard deviation value to normalize with.",
          "minimum": 1e-06,
          "title": "Std",
          "type": "number"
        }
      },
      "required": [
        "mean",
        "std"
      ],
      "title": "model.v0_5.FixedZeroMeanUnitVarianceKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Subtract a given mean and divide by the standard deviation.\n\nNormalize with fixed, precomputed values for\n`FixedZeroMeanUnitVarianceKwargs.mean` and `FixedZeroMeanUnitVarianceKwargs.std`\nUse `FixedZeroMeanUnitVarianceAlongAxisKwargs` for independent scaling along given\naxes.\n\nExamples:\n1. scalar value for whole tensor\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: fixed_zero_mean_unit_variance\n        kwargs:\n          mean: 103.5\n          std: 13.7\n    ```\n    - in Python\n    >>> preprocessing = [FixedZeroMeanUnitVarianceDescr(\n    ...   kwargs=FixedZeroMeanUnitVarianceKwargs(mean=103.5, std=13.7)\n    ... )]\n\n2. independently along an axis\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: fixed_zero_mean_unit_variance\n        kwargs:\n          axis: channel\n          mean: [101.5, 102.5, 103.5]\n          std: [11.7, 12.7, 13.7]\n    ```\n    - in Python\n    >>> preprocessing = [FixedZeroMeanUnitVarianceDescr(\n    ...   kwargs=FixedZeroMeanUnitVarianceAlongAxisKwargs(\n    ...     axis=AxisId(\"channel\"),\n    ...     mean=[101.5, 102.5, 103.5],\n    ...     std=[11.7, 12.7, 13.7],\n    ...   )\n    ... )]",
  "properties": {
    "id": {
      "const": "fixed_zero_mean_unit_variance",
      "title": "Id",
      "type": "string"
    },
    "kwargs": {
      "anyOf": [
        {
          "$ref": "#/$defs/FixedZeroMeanUnitVarianceKwargs"
        },
        {
          "$ref": "#/$defs/FixedZeroMeanUnitVarianceAlongAxisKwargs"
        }
      ],
      "title": "Kwargs"
    }
  },
  "required": [
    "id",
    "kwargs"
  ],
  "title": "model.v0_5.FixedZeroMeanUnitVarianceDescr",
  "type": "object"
}

Fields:

id pydantic-field ¤

id: Literal["fixed_zero_mean_unit_variance"] = (
    "fixed_zero_mean_unit_variance"
)

implemented_id class-attribute ¤

implemented_id: Literal["fixed_zero_mean_unit_variance"] = (
    "fixed_zero_mean_unit_variance"
)

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

FixedZeroMeanUnitVarianceKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for FixedZeroMeanUnitVarianceDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `FixedZeroMeanUnitVarianceDescr`",
  "properties": {
    "mean": {
      "description": "The mean value to normalize with.",
      "title": "Mean",
      "type": "number"
    },
    "std": {
      "description": "The standard deviation value to normalize with.",
      "minimum": 1e-06,
      "title": "Std",
      "type": "number"
    }
  },
  "required": [
    "mean",
    "std"
  ],
  "title": "model.v0_5.FixedZeroMeanUnitVarianceKwargs",
  "type": "object"
}

Fields:

  • mean (float)
  • std (Annotated[float, Ge(1e-06)])

mean pydantic-field ¤

mean: float

The mean value to normalize with.

std pydantic-field ¤

std: Annotated[float, Ge(1e-06)]

The standard deviation value to normalize with.

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

IndexAxisBase pydantic-model ¤

Bases: AxisBase

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "id": {
      "default": "index",
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "index",
      "title": "Type",
      "type": "string"
    }
  },
  "required": [
    "type"
  ],
  "title": "model.v0_5.IndexAxisBase",
  "type": "object"
}

Fields:

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

implemented_type class-attribute ¤

implemented_type: Literal['index'] = 'index'

scale property ¤

scale: float

type pydantic-field ¤

type: Literal['index'] = 'index'

unit property ¤

unit

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

IndexInputAxis pydantic-model ¤

Bases: IndexAxisBase, _WithInputAxisSize

Show JSON schema:
{
  "$defs": {
    "ParameterizedSize": {
      "additionalProperties": false,
      "description": "Describes a range of valid tensor axis sizes as `size = min + n*step`.\n\n- **min** and **step** are given by the model description.\n- All blocksize paramters n = 0,1,2,... yield a valid `size`.\n- A greater blocksize paramter n = 0,1,2,... results in a greater **size**.\n  This allows to adjust the axis size more generically.",
      "properties": {
        "min": {
          "exclusiveMinimum": 0,
          "title": "Min",
          "type": "integer"
        },
        "step": {
          "exclusiveMinimum": 0,
          "title": "Step",
          "type": "integer"
        }
      },
      "required": [
        "min",
        "step"
      ],
      "title": "model.v0_5.ParameterizedSize",
      "type": "object"
    },
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "size": {
      "anyOf": [
        {
          "exclusiveMinimum": 0,
          "type": "integer"
        },
        {
          "$ref": "#/$defs/ParameterizedSize"
        },
        {
          "$ref": "#/$defs/SizeReference"
        }
      ],
      "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
      "examples": [
        10,
        {
          "min": 32,
          "step": 16
        },
        {
          "axis_id": "a",
          "offset": 5,
          "tensor_id": "t"
        }
      ],
      "title": "Size"
    },
    "id": {
      "default": "index",
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "index",
      "title": "Type",
      "type": "string"
    },
    "concatenable": {
      "default": false,
      "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
      "title": "Concatenable",
      "type": "boolean"
    }
  },
  "required": [
    "size",
    "type"
  ],
  "title": "model.v0_5.IndexInputAxis",
  "type": "object"
}

Fields:

concatenable pydantic-field ¤

concatenable: bool = False

If a model has a concatenable input axis, it can be processed blockwise, splitting a longer sample axis into blocks matching its input tensor description. Output axes are concatenable if they have a SizeReference to a concatenable input axis.

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

implemented_type class-attribute ¤

implemented_type: Literal['index'] = 'index'

scale property ¤

scale: float

size pydantic-field ¤

size: Annotated[
    Union[
        Annotated[int, Gt(0)],
        ParameterizedSize,
        SizeReference,
    ],
    Field(
        examples=[
            10,
            ParameterizedSize(min=32, step=16).model_dump(
                mode="json"
            ),
            SizeReference(
                tensor_id=TensorId("t"),
                axis_id=AxisId("a"),
                offset=5,
            ).model_dump(mode="json"),
        ]
    ),
]

The size/length of this axis can be specified as - fixed integer - parameterized series of valid sizes (ParameterizedSize) - reference to another axis with an optional offset (SizeReference)

type pydantic-field ¤

type: Literal['index'] = 'index'

unit property ¤

unit

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

IndexOutputAxis pydantic-model ¤

Bases: IndexAxisBase

Show JSON schema:
{
  "$defs": {
    "DataDependentSize": {
      "additionalProperties": false,
      "properties": {
        "min": {
          "default": 1,
          "exclusiveMinimum": 0,
          "title": "Min",
          "type": "integer"
        },
        "max": {
          "anyOf": [
            {
              "exclusiveMinimum": 1,
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Max"
        }
      },
      "title": "model.v0_5.DataDependentSize",
      "type": "object"
    },
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "id": {
      "default": "index",
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "index",
      "title": "Type",
      "type": "string"
    },
    "size": {
      "anyOf": [
        {
          "exclusiveMinimum": 0,
          "type": "integer"
        },
        {
          "$ref": "#/$defs/SizeReference"
        },
        {
          "$ref": "#/$defs/DataDependentSize"
        }
      ],
      "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (`SizeReference`)\n- data dependent size using `DataDependentSize` (size is only known after model inference)",
      "examples": [
        10,
        {
          "axis_id": "a",
          "offset": 5,
          "tensor_id": "t"
        }
      ],
      "title": "Size"
    }
  },
  "required": [
    "type",
    "size"
  ],
  "title": "model.v0_5.IndexOutputAxis",
  "type": "object"
}

Fields:

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

implemented_type class-attribute ¤

implemented_type: Literal['index'] = 'index'

scale property ¤

scale: float

size pydantic-field ¤

size: Annotated[
    Union[
        Annotated[int, Gt(0)],
        SizeReference,
        DataDependentSize,
    ],
    Field(
        examples=[
            10,
            SizeReference(
                tensor_id=TensorId("t"),
                axis_id=AxisId("a"),
                offset=5,
            ).model_dump(mode="json"),
        ]
    ),
]

The size/length of this axis can be specified as - fixed integer - reference to another axis with an optional offset (SizeReference) - data dependent size using DataDependentSize (size is only known after model inference)

type pydantic-field ¤

type: Literal['index'] = 'index'

unit property ¤

unit

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

InputTensorDescr pydantic-model ¤

Bases: TensorDescrBase[InputAxis]

Show JSON schema:
{
  "$defs": {
    "BatchAxis": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "batch",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "batch",
          "title": "Type",
          "type": "string"
        },
        "size": {
          "anyOf": [
            {
              "const": 1,
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The batch size may be fixed to 1,\notherwise (the default) it may be chosen arbitrarily depending on available memory",
          "title": "Size"
        }
      },
      "required": [
        "type"
      ],
      "title": "model.v0_5.BatchAxis",
      "type": "object"
    },
    "BinarizeAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold values along `axis`",
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "title": "Threshold",
          "type": "array"
        },
        "axis": {
          "description": "The `threshold` axis",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "required": [
        "threshold",
        "axis"
      ],
      "title": "model.v0_5.BinarizeAlongAxisKwargs",
      "type": "object"
    },
    "BinarizeDescr": {
      "additionalProperties": false,
      "description": "Binarize the tensor with a fixed threshold.\n\nValues above `BinarizeKwargs.threshold`/`BinarizeAlongAxisKwargs.threshold`\nwill be set to one, values below the threshold to zero.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: binarize\n        kwargs:\n          axis: 'channel'\n          threshold: [0.25, 0.5, 0.75]\n    ```\n- in Python:\n    >>> postprocessing = [BinarizeDescr(\n    ...   kwargs=BinarizeAlongAxisKwargs(\n    ...       axis=AxisId('channel'),\n    ...       threshold=[0.25, 0.5, 0.75],\n    ...   )\n    ... )]",
      "properties": {
        "id": {
          "const": "binarize",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "anyOf": [
            {
              "$ref": "#/$defs/BinarizeKwargs"
            },
            {
              "$ref": "#/$defs/BinarizeAlongAxisKwargs"
            }
          ],
          "title": "Kwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.BinarizeDescr",
      "type": "object"
    },
    "BinarizeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold",
          "title": "Threshold",
          "type": "number"
        }
      },
      "required": [
        "threshold"
      ],
      "title": "model.v0_5.BinarizeKwargs",
      "type": "object"
    },
    "ChannelAxis": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "channel",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "channel",
          "title": "Type",
          "type": "string"
        },
        "channel_names": {
          "items": {
            "minLength": 1,
            "title": "Identifier",
            "type": "string"
          },
          "minItems": 1,
          "title": "Channel Names",
          "type": "array"
        }
      },
      "required": [
        "type",
        "channel_names"
      ],
      "title": "model.v0_5.ChannelAxis",
      "type": "object"
    },
    "ClipDescr": {
      "additionalProperties": false,
      "description": "Set tensor values below min to min and above max to max.\n\nSee `ScaleRangeDescr` for examples.",
      "properties": {
        "id": {
          "const": "clip",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ClipKwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.ClipDescr",
      "type": "object"
    },
    "ClipKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ClipDescr`",
      "properties": {
        "min": {
          "description": "minimum value for clipping",
          "title": "Min",
          "type": "number"
        },
        "max": {
          "description": "maximum value for clipping",
          "title": "Max",
          "type": "number"
        }
      },
      "required": [
        "min",
        "max"
      ],
      "title": "model.v0_4.ClipKwargs",
      "type": "object"
    },
    "EnsureDtypeDescr": {
      "additionalProperties": false,
      "description": "Cast the tensor data type to `EnsureDtypeKwargs.dtype` (if not matching).\n\nThis can for example be used to ensure the inner neural network model gets a\ndifferent input tensor data type than the fully described bioimage.io model does.\n\nExamples:\n    The described bioimage.io model (incl. preprocessing) accepts any\n    float32-compatible tensor, normalizes it with percentiles and clipping and then\n    casts it to uint8, which is what the neural network in this example expects.\n    - in YAML\n        ```yaml\n        inputs:\n        - data:\n            type: float32  # described bioimage.io model is compatible with any float32 input tensor\n          preprocessing:\n          - id: scale_range\n              kwargs:\n              axes: ['y', 'x']\n              max_percentile: 99.8\n              min_percentile: 5.0\n          - id: clip\n              kwargs:\n              min: 0.0\n              max: 1.0\n          - id: ensure_dtype  # the neural network of the model requires uint8\n              kwargs:\n              dtype: uint8\n        ```\n    - in Python:\n        >>> preprocessing = [\n        ...     ScaleRangeDescr(\n        ...         kwargs=ScaleRangeKwargs(\n        ...           axes= (AxisId('y'), AxisId('x')),\n        ...           max_percentile= 99.8,\n        ...           min_percentile= 5.0,\n        ...         )\n        ...     ),\n        ...     ClipDescr(kwargs=ClipKwargs(min=0.0, max=1.0)),\n        ...     EnsureDtypeDescr(kwargs=EnsureDtypeKwargs(dtype=\"uint8\")),\n        ... ]",
      "properties": {
        "id": {
          "const": "ensure_dtype",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/EnsureDtypeKwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.EnsureDtypeDescr",
      "type": "object"
    },
    "EnsureDtypeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `EnsureDtypeDescr`",
      "properties": {
        "dtype": {
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64",
            "bool"
          ],
          "title": "Dtype",
          "type": "string"
        }
      },
      "required": [
        "dtype"
      ],
      "title": "model.v0_5.EnsureDtypeKwargs",
      "type": "object"
    },
    "FileDescr": {
      "additionalProperties": false,
      "description": "A file description",
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "File source",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        }
      },
      "required": [
        "source"
      ],
      "title": "_internal.io.FileDescr",
      "type": "object"
    },
    "FixedZeroMeanUnitVarianceAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `FixedZeroMeanUnitVarianceDescr`",
      "properties": {
        "mean": {
          "description": "The mean value(s) to normalize with.",
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "title": "Mean",
          "type": "array"
        },
        "std": {
          "description": "The standard deviation value(s) to normalize with.\nSize must match `mean` values.",
          "items": {
            "minimum": 1e-06,
            "type": "number"
          },
          "minItems": 1,
          "title": "Std",
          "type": "array"
        },
        "axis": {
          "description": "The axis of the mean/std values to normalize each entry along that dimension\nseparately.",
          "examples": [
            "channel",
            "index"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "required": [
        "mean",
        "std",
        "axis"
      ],
      "title": "model.v0_5.FixedZeroMeanUnitVarianceAlongAxisKwargs",
      "type": "object"
    },
    "FixedZeroMeanUnitVarianceDescr": {
      "additionalProperties": false,
      "description": "Subtract a given mean and divide by the standard deviation.\n\nNormalize with fixed, precomputed values for\n`FixedZeroMeanUnitVarianceKwargs.mean` and `FixedZeroMeanUnitVarianceKwargs.std`\nUse `FixedZeroMeanUnitVarianceAlongAxisKwargs` for independent scaling along given\naxes.\n\nExamples:\n1. scalar value for whole tensor\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: fixed_zero_mean_unit_variance\n        kwargs:\n          mean: 103.5\n          std: 13.7\n    ```\n    - in Python\n    >>> preprocessing = [FixedZeroMeanUnitVarianceDescr(\n    ...   kwargs=FixedZeroMeanUnitVarianceKwargs(mean=103.5, std=13.7)\n    ... )]\n\n2. independently along an axis\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: fixed_zero_mean_unit_variance\n        kwargs:\n          axis: channel\n          mean: [101.5, 102.5, 103.5]\n          std: [11.7, 12.7, 13.7]\n    ```\n    - in Python\n    >>> preprocessing = [FixedZeroMeanUnitVarianceDescr(\n    ...   kwargs=FixedZeroMeanUnitVarianceAlongAxisKwargs(\n    ...     axis=AxisId(\"channel\"),\n    ...     mean=[101.5, 102.5, 103.5],\n    ...     std=[11.7, 12.7, 13.7],\n    ...   )\n    ... )]",
      "properties": {
        "id": {
          "const": "fixed_zero_mean_unit_variance",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "anyOf": [
            {
              "$ref": "#/$defs/FixedZeroMeanUnitVarianceKwargs"
            },
            {
              "$ref": "#/$defs/FixedZeroMeanUnitVarianceAlongAxisKwargs"
            }
          ],
          "title": "Kwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.FixedZeroMeanUnitVarianceDescr",
      "type": "object"
    },
    "FixedZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `FixedZeroMeanUnitVarianceDescr`",
      "properties": {
        "mean": {
          "description": "The mean value to normalize with.",
          "title": "Mean",
          "type": "number"
        },
        "std": {
          "description": "The standard deviation value to normalize with.",
          "minimum": 1e-06,
          "title": "Std",
          "type": "number"
        }
      },
      "required": [
        "mean",
        "std"
      ],
      "title": "model.v0_5.FixedZeroMeanUnitVarianceKwargs",
      "type": "object"
    },
    "IndexInputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/ParameterizedSize"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
          "examples": [
            10,
            {
              "min": 32,
              "step": 16
            },
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "index",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "index",
          "title": "Type",
          "type": "string"
        },
        "concatenable": {
          "default": false,
          "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
          "title": "Concatenable",
          "type": "boolean"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.IndexInputAxis",
      "type": "object"
    },
    "IntervalOrRatioDataDescr": {
      "additionalProperties": false,
      "properties": {
        "type": {
          "default": "float32",
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64"
          ],
          "examples": [
            "float32",
            "float64",
            "uint8",
            "uint16"
          ],
          "title": "Type",
          "type": "string"
        },
        "range": {
          "default": [
            null,
            null
          ],
          "description": "Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.\n`None` corresponds to min/max of what can be expressed by **type**.",
          "maxItems": 2,
          "minItems": 2,
          "prefixItems": [
            {
              "anyOf": [
                {
                  "type": "number"
                },
                {
                  "type": "null"
                }
              ]
            },
            {
              "anyOf": [
                {
                  "type": "number"
                },
                {
                  "type": "null"
                }
              ]
            }
          ],
          "title": "Range",
          "type": "array"
        },
        "unit": {
          "anyOf": [
            {
              "const": "arbitrary unit",
              "type": "string"
            },
            {
              "description": "An SI unit",
              "minLength": 1,
              "pattern": "^(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?((\u00b7(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?)|(/(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^+?[1-9]\\d*)?))*$",
              "title": "SiUnit",
              "type": "string"
            }
          ],
          "default": "arbitrary unit",
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "description": "Scale for data on an interval (or ratio) scale.",
          "title": "Scale",
          "type": "number"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Offset for data on a ratio scale.",
          "title": "Offset"
        }
      },
      "title": "model.v0_5.IntervalOrRatioDataDescr",
      "type": "object"
    },
    "NominalOrOrdinalDataDescr": {
      "additionalProperties": false,
      "properties": {
        "values": {
          "anyOf": [
            {
              "items": {
                "type": "integer"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "boolean"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "string"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "description": "A fixed set of nominal or an ascending sequence of ordinal values.\nIn this case `data.type` is required to be an unsigend integer type, e.g. 'uint8'.\nString `values` are interpreted as labels for tensor values 0, ..., N.\nNote: as YAML 1.2 does not natively support a \"set\" datatype,\nnominal values should be given as a sequence (aka list/array) as well.",
          "title": "Values"
        },
        "type": {
          "default": "uint8",
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64",
            "bool"
          ],
          "examples": [
            "float32",
            "uint8",
            "uint16",
            "int64",
            "bool"
          ],
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "const": "arbitrary unit",
              "type": "string"
            },
            {
              "description": "An SI unit",
              "minLength": 1,
              "pattern": "^(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?((\u00b7(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?)|(/(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^+?[1-9]\\d*)?))*$",
              "title": "SiUnit",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        }
      },
      "required": [
        "values"
      ],
      "title": "model.v0_5.NominalOrOrdinalDataDescr",
      "type": "object"
    },
    "ParameterizedSize": {
      "additionalProperties": false,
      "description": "Describes a range of valid tensor axis sizes as `size = min + n*step`.\n\n- **min** and **step** are given by the model description.\n- All blocksize paramters n = 0,1,2,... yield a valid `size`.\n- A greater blocksize paramter n = 0,1,2,... results in a greater **size**.\n  This allows to adjust the axis size more generically.",
      "properties": {
        "min": {
          "exclusiveMinimum": 0,
          "title": "Min",
          "type": "integer"
        },
        "step": {
          "exclusiveMinimum": 0,
          "title": "Step",
          "type": "integer"
        }
      },
      "required": [
        "min",
        "step"
      ],
      "title": "model.v0_5.ParameterizedSize",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "ScaleLinearAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "Key word arguments for `ScaleLinearDescr`",
      "properties": {
        "axis": {
          "description": "The axis of gain and offset values.",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "gain": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "default": 0.0,
          "description": "additive term",
          "title": "Offset"
        }
      },
      "required": [
        "axis"
      ],
      "title": "model.v0_5.ScaleLinearAlongAxisKwargs",
      "type": "object"
    },
    "ScaleLinearDescr": {
      "additionalProperties": false,
      "description": "Fixed linear scaling.\n\nExamples:\n  1. Scale with scalar gain and offset\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_linear\n        kwargs:\n          gain: 2.0\n          offset: 3.0\n    ```\n    - in Python:\n    >>> preprocessing = [\n    ...     ScaleLinearDescr(kwargs=ScaleLinearKwargs(gain= 2.0, offset=3.0))\n    ... ]\n\n  2. Independent scaling along an axis\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_linear\n        kwargs:\n          axis: 'channel'\n          gain: [1.0, 2.0, 3.0]\n    ```\n    - in Python:\n    >>> preprocessing = [\n    ...     ScaleLinearDescr(\n    ...         kwargs=ScaleLinearAlongAxisKwargs(\n    ...             axis=AxisId(\"channel\"),\n    ...             gain=[1.0, 2.0, 3.0],\n    ...         )\n    ...     )\n    ... ]",
      "properties": {
        "id": {
          "const": "scale_linear",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "anyOf": [
            {
              "$ref": "#/$defs/ScaleLinearKwargs"
            },
            {
              "$ref": "#/$defs/ScaleLinearAlongAxisKwargs"
            }
          ],
          "title": "Kwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.ScaleLinearDescr",
      "type": "object"
    },
    "ScaleLinearKwargs": {
      "additionalProperties": false,
      "description": "Key word arguments for `ScaleLinearDescr`",
      "properties": {
        "gain": {
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain",
          "type": "number"
        },
        "offset": {
          "default": 0.0,
          "description": "additive term",
          "title": "Offset",
          "type": "number"
        }
      },
      "title": "model.v0_5.ScaleLinearKwargs",
      "type": "object"
    },
    "ScaleRangeDescr": {
      "additionalProperties": false,
      "description": "Scale with percentiles.\n\nExamples:\n1. Scale linearly to map 5th percentile to 0 and 99.8th percentile to 1.0\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_range\n        kwargs:\n          axes: ['y', 'x']\n          max_percentile: 99.8\n          min_percentile: 5.0\n    ```\n    - in Python\n    >>> preprocessing = [\n    ...     ScaleRangeDescr(\n    ...         kwargs=ScaleRangeKwargs(\n    ...           axes= (AxisId('y'), AxisId('x')),\n    ...           max_percentile= 99.8,\n    ...           min_percentile= 5.0,\n    ...         )\n    ...     )\n    ... ]\n\n  2. Combine the above scaling with additional clipping to clip values outside the range given by the percentiles.\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_range\n        kwargs:\n          axes: ['y', 'x']\n          max_percentile: 99.8\n          min_percentile: 5.0\n              - id: scale_range\n       - id: clip\n         kwargs:\n          min: 0.0\n          max: 1.0\n    ```\n    - in Python\n    >>> preprocessing = [\n    ...   ScaleRangeDescr(\n    ...     kwargs=ScaleRangeKwargs(\n    ...       axes= (AxisId('y'), AxisId('x')),\n    ...       max_percentile= 99.8,\n    ...       min_percentile= 5.0,\n    ...     )\n    ...   ),\n    ...   ClipDescr(\n    ...     kwargs=ClipKwargs(\n    ...       min=0.0,\n    ...       max=1.0,\n    ...     )\n    ...   ),\n    ... ]",
      "properties": {
        "id": {
          "const": "scale_range",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleRangeKwargs"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.ScaleRangeDescr",
      "type": "object"
    },
    "ScaleRangeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleRangeDescr`\n\nFor `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)\nthis processing step normalizes data to the [0, 1] intervall.\nFor other percentiles the normalized values will partially be outside the [0, 1]\nintervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the\nnormalized values to a range.",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "items": {
                "maxLength": 16,
                "minLength": 1,
                "title": "AxisId",
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute the min/max percentile value.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize samples independently, leave out the \"batch\" axis.\nDefault: Scale all axes jointly.",
          "examples": [
            [
              "batch",
              "x",
              "y"
            ]
          ],
          "title": "Axes"
        },
        "min_percentile": {
          "default": 0.0,
          "description": "The lower percentile used to determine the value to align with zero.",
          "exclusiveMaximum": 100,
          "minimum": 0,
          "title": "Min Percentile",
          "type": "number"
        },
        "max_percentile": {
          "default": 100.0,
          "description": "The upper percentile used to determine the value to align with one.\nHas to be bigger than `min_percentile`.\nThe range is 1 to 100 instead of 0 to 100 to avoid mistakenly\naccepting percentiles specified in the range 0.0 to 1.0.",
          "exclusiveMinimum": 1,
          "maximum": 100,
          "title": "Max Percentile",
          "type": "number"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability.\n`out = (tensor - v_lower) / (v_upper - v_lower + eps)`;\nwith `v_lower,v_upper` values at the respective percentiles.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        },
        "reference_tensor": {
          "anyOf": [
            {
              "maxLength": 32,
              "minLength": 1,
              "title": "TensorId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tensor ID to compute the percentiles from. Default: The tensor itself.\nFor any tensor in `inputs` only input tensor references are allowed.",
          "title": "Reference Tensor"
        }
      },
      "title": "model.v0_5.ScaleRangeKwargs",
      "type": "object"
    },
    "SigmoidDescr": {
      "additionalProperties": false,
      "description": "The logistic sigmoid function, a.k.a. expit function.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: sigmoid\n    ```\n- in Python:\n    >>> postprocessing = [SigmoidDescr()]",
      "properties": {
        "id": {
          "const": "sigmoid",
          "title": "Id",
          "type": "string"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.SigmoidDescr",
      "type": "object"
    },
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    },
    "SoftmaxDescr": {
      "additionalProperties": false,
      "description": "The softmax function.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: softmax\n        kwargs:\n          axis: channel\n    ```\n- in Python:\n    >>> postprocessing = [SoftmaxDescr(kwargs=SoftmaxKwargs(axis=AxisId(\"channel\")))]",
      "properties": {
        "id": {
          "const": "softmax",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/SoftmaxKwargs"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.SoftmaxDescr",
      "type": "object"
    },
    "SoftmaxKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `SoftmaxDescr`",
      "properties": {
        "axis": {
          "default": "channel",
          "description": "The axis to apply the softmax function along.\nNote:\n    Defaults to 'channel' axis\n    (which may not exist, in which case\n    a different axis id has to be specified).",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "title": "model.v0_5.SoftmaxKwargs",
      "type": "object"
    },
    "SpaceInputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/ParameterizedSize"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
          "examples": [
            10,
            {
              "min": 32,
              "step": 16
            },
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "x",
          "examples": [
            "x",
            "y",
            "z"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "space",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attometer",
                "angstrom",
                "centimeter",
                "decimeter",
                "exameter",
                "femtometer",
                "foot",
                "gigameter",
                "hectometer",
                "inch",
                "kilometer",
                "megameter",
                "meter",
                "micrometer",
                "mile",
                "millimeter",
                "nanometer",
                "parsec",
                "petameter",
                "picometer",
                "terameter",
                "yard",
                "yoctometer",
                "yottameter",
                "zeptometer",
                "zettameter"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        },
        "concatenable": {
          "default": false,
          "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
          "title": "Concatenable",
          "type": "boolean"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.SpaceInputAxis",
      "type": "object"
    },
    "TimeInputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/ParameterizedSize"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
          "examples": [
            10,
            {
              "min": 32,
              "step": 16
            },
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "time",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "time",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attosecond",
                "centisecond",
                "day",
                "decisecond",
                "exasecond",
                "femtosecond",
                "gigasecond",
                "hectosecond",
                "hour",
                "kilosecond",
                "megasecond",
                "microsecond",
                "millisecond",
                "minute",
                "nanosecond",
                "petasecond",
                "picosecond",
                "second",
                "terasecond",
                "yoctosecond",
                "yottasecond",
                "zeptosecond",
                "zettasecond"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        },
        "concatenable": {
          "default": false,
          "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
          "title": "Concatenable",
          "type": "boolean"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.TimeInputAxis",
      "type": "object"
    },
    "ZeroMeanUnitVarianceDescr": {
      "additionalProperties": false,
      "description": "Subtract mean and divide by variance.\n\nExamples:\n    Subtract tensor mean and variance\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: zero_mean_unit_variance\n    ```\n    - in Python\n    >>> preprocessing = [ZeroMeanUnitVarianceDescr()]",
      "properties": {
        "id": {
          "const": "zero_mean_unit_variance",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ZeroMeanUnitVarianceKwargs"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.ZeroMeanUnitVarianceDescr",
      "type": "object"
    },
    "ZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ZeroMeanUnitVarianceDescr`",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "items": {
                "maxLength": 16,
                "minLength": 1,
                "title": "AxisId",
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize each sample independently leave out the 'batch' axis.\nDefault: Scale all axes jointly.",
          "examples": [
            [
              "batch",
              "x",
              "y"
            ]
          ],
          "title": "Axes"
        },
        "eps": {
          "default": 1e-06,
          "description": "epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "title": "model.v0_5.ZeroMeanUnitVarianceKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "id": {
      "default": "input",
      "description": "Input tensor id.\nNo duplicates are allowed across all inputs and outputs.",
      "maxLength": 32,
      "minLength": 1,
      "title": "TensorId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "free text description",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "axes": {
      "description": "tensor axes",
      "items": {
        "discriminator": {
          "mapping": {
            "batch": "#/$defs/BatchAxis",
            "channel": "#/$defs/ChannelAxis",
            "index": "#/$defs/IndexInputAxis",
            "space": "#/$defs/SpaceInputAxis",
            "time": "#/$defs/TimeInputAxis"
          },
          "propertyName": "type"
        },
        "oneOf": [
          {
            "$ref": "#/$defs/BatchAxis"
          },
          {
            "$ref": "#/$defs/ChannelAxis"
          },
          {
            "$ref": "#/$defs/IndexInputAxis"
          },
          {
            "$ref": "#/$defs/TimeInputAxis"
          },
          {
            "$ref": "#/$defs/SpaceInputAxis"
          }
        ]
      },
      "minItems": 1,
      "title": "Axes",
      "type": "array"
    },
    "test_tensor": {
      "anyOf": [
        {
          "$ref": "#/$defs/FileDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "An example tensor to use for testing.\nUsing the model with the test input tensors is expected to yield the test output tensors.\nEach test tensor has be a an ndarray in the\n[numpy.lib file format](https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html#module-numpy.lib.format).\nThe file extension must be '.npy'."
    },
    "sample_tensor": {
      "anyOf": [
        {
          "$ref": "#/$defs/FileDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "A sample tensor to illustrate a possible input/output for the model,\nThe sample image primarily serves to inform a human user about an example use case\nand is typically stored as .hdf5, .png or .tiff.\nIt has to be readable by the [imageio library](https://imageio.readthedocs.io/en/stable/formats/index.html#supported-formats)\n(numpy's `.npy` format is not supported).\nThe image dimensionality has to match the number of axes specified in this tensor description."
    },
    "data": {
      "anyOf": [
        {
          "$ref": "#/$defs/NominalOrOrdinalDataDescr"
        },
        {
          "$ref": "#/$defs/IntervalOrRatioDataDescr"
        },
        {
          "items": {
            "anyOf": [
              {
                "$ref": "#/$defs/NominalOrOrdinalDataDescr"
              },
              {
                "$ref": "#/$defs/IntervalOrRatioDataDescr"
              }
            ]
          },
          "minItems": 1,
          "type": "array"
        }
      ],
      "default": {
        "type": "float32",
        "range": [
          null,
          null
        ],
        "unit": "arbitrary unit",
        "scale": 1.0,
        "offset": null
      },
      "description": "Description of the tensor's data values, optionally per channel.\nIf specified per channel, the data `type` needs to match across channels.",
      "title": "Data"
    },
    "optional": {
      "default": false,
      "description": "indicates that this tensor may be `None`",
      "title": "Optional",
      "type": "boolean"
    },
    "preprocessing": {
      "description": "Description of how this input should be preprocessed.\n\nnotes:\n- If preprocessing does not start with an 'ensure_dtype' entry, it is added\n  to ensure an input tensor's data type matches the input tensor's data description.\n- If preprocessing does not end with an 'ensure_dtype' or 'binarize' entry, an\n  'ensure_dtype' step is added to ensure preprocessing steps are not unintentionally\n  changing the data type.",
      "items": {
        "discriminator": {
          "mapping": {
            "binarize": "#/$defs/BinarizeDescr",
            "clip": "#/$defs/ClipDescr",
            "ensure_dtype": "#/$defs/EnsureDtypeDescr",
            "fixed_zero_mean_unit_variance": "#/$defs/FixedZeroMeanUnitVarianceDescr",
            "scale_linear": "#/$defs/ScaleLinearDescr",
            "scale_range": "#/$defs/ScaleRangeDescr",
            "sigmoid": "#/$defs/SigmoidDescr",
            "softmax": "#/$defs/SoftmaxDescr",
            "zero_mean_unit_variance": "#/$defs/ZeroMeanUnitVarianceDescr"
          },
          "propertyName": "id"
        },
        "oneOf": [
          {
            "$ref": "#/$defs/BinarizeDescr"
          },
          {
            "$ref": "#/$defs/ClipDescr"
          },
          {
            "$ref": "#/$defs/EnsureDtypeDescr"
          },
          {
            "$ref": "#/$defs/FixedZeroMeanUnitVarianceDescr"
          },
          {
            "$ref": "#/$defs/ScaleLinearDescr"
          },
          {
            "$ref": "#/$defs/ScaleRangeDescr"
          },
          {
            "$ref": "#/$defs/SigmoidDescr"
          },
          {
            "$ref": "#/$defs/SoftmaxDescr"
          },
          {
            "$ref": "#/$defs/ZeroMeanUnitVarianceDescr"
          }
        ]
      },
      "title": "Preprocessing",
      "type": "array"
    }
  },
  "required": [
    "axes"
  ],
  "title": "model.v0_5.InputTensorDescr",
  "type": "object"
}

Fields:

Validators:

  • _validate_axesaxes
  • _validate_sample_tensor
  • _check_data_type_across_channelsdata
  • _check_data_matches_channelaxis
  • _validate_preprocessing_kwargs

axes pydantic-field ¤

axes: NotEmpty[Sequence[IO_AxisT]]

tensor axes

data pydantic-field ¤

data: Union[
    TensorDataDescr, NotEmpty[Sequence[TensorDataDescr]]
]

Description of the tensor's data values, optionally per channel. If specified per channel, the data type needs to match across channels.

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

free text description

dtype property ¤

dtype: Literal[
    "float32",
    "float64",
    "uint8",
    "int8",
    "uint16",
    "int16",
    "uint32",
    "int32",
    "uint64",
    "int64",
    "bool",
]

dtype as specified under data.type or data[i].type

id pydantic-field ¤

Input tensor id. No duplicates are allowed across all inputs and outputs.

optional pydantic-field ¤

optional: bool = False

indicates that this tensor may be None

preprocessing pydantic-field ¤

preprocessing: List[PreprocessingDescr]

Description of how this input should be preprocessed.

notes: - If preprocessing does not start with an 'ensure_dtype' entry, it is added to ensure an input tensor's data type matches the input tensor's data description. - If preprocessing does not end with an 'ensure_dtype' or 'binarize' entry, an 'ensure_dtype' step is added to ensure preprocessing steps are not unintentionally changing the data type.

sample_tensor pydantic-field ¤

sample_tensor: FAIR[Optional[FileDescr_]] = None

A sample tensor to illustrate a possible input/output for the model, The sample image primarily serves to inform a human user about an example use case and is typically stored as .hdf5, .png or .tiff. It has to be readable by the imageio library (numpy's .npy format is not supported). The image dimensionality has to match the number of axes specified in this tensor description.

shape property ¤

shape

test_tensor pydantic-field ¤

test_tensor: FAIR[Optional[FileDescr_]] = None

An example tensor to use for testing. Using the model with the test input tensors is expected to yield the test output tensors. Each test tensor has be a an ndarray in the numpy.lib file format. The file extension must be '.npy'.

get_axis_sizes_for_array ¤

get_axis_sizes_for_array(
    array: NDArray[Any],
) -> Dict[AxisId, int]
Source code in src/bioimageio/spec/model/v0_5.py
1684
1685
1686
1687
1688
1689
1690
def get_axis_sizes_for_array(self, array: NDArray[Any]) -> Dict[AxisId, int]:
    if len(array.shape) != len(self.axes):
        raise ValueError(
            f"Dimension mismatch: array shape {array.shape} (#{len(array.shape)})"
            + f" incompatible with {len(self.axes)} axes."
        )
    return {a.id: array.shape[i] for i, a in enumerate(self.axes)}

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

IntervalOrRatioDataDescr pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "type": {
      "default": "float32",
      "enum": [
        "float32",
        "float64",
        "uint8",
        "int8",
        "uint16",
        "int16",
        "uint32",
        "int32",
        "uint64",
        "int64"
      ],
      "examples": [
        "float32",
        "float64",
        "uint8",
        "uint16"
      ],
      "title": "Type",
      "type": "string"
    },
    "range": {
      "default": [
        null,
        null
      ],
      "description": "Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.\n`None` corresponds to min/max of what can be expressed by **type**.",
      "maxItems": 2,
      "minItems": 2,
      "prefixItems": [
        {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "type": "null"
            }
          ]
        },
        {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "type": "null"
            }
          ]
        }
      ],
      "title": "Range",
      "type": "array"
    },
    "unit": {
      "anyOf": [
        {
          "const": "arbitrary unit",
          "type": "string"
        },
        {
          "description": "An SI unit",
          "minLength": 1,
          "pattern": "^(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?((\u00b7(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?)|(/(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^+?[1-9]\\d*)?))*$",
          "title": "SiUnit",
          "type": "string"
        }
      ],
      "default": "arbitrary unit",
      "title": "Unit"
    },
    "scale": {
      "default": 1.0,
      "description": "Scale for data on an interval (or ratio) scale.",
      "title": "Scale",
      "type": "number"
    },
    "offset": {
      "anyOf": [
        {
          "type": "number"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Offset for data on a ratio scale.",
      "title": "Offset"
    }
  },
  "title": "model.v0_5.IntervalOrRatioDataDescr",
  "type": "object"
}

Fields:

Validators:

  • _replace_inf

offset pydantic-field ¤

offset: Optional[float] = None

Offset for data on a ratio scale.

range pydantic-field ¤

range: Tuple[Optional[float], Optional[float]] = (
    None,
    None,
)

Tuple (minimum, maximum) specifying the allowed range of the data in this tensor. None corresponds to min/max of what can be expressed by type.

scale pydantic-field ¤

scale: float = 1.0

Scale for data on an interval (or ratio) scale.

type pydantic-field ¤

type: Annotated[
    IntervalOrRatioDType,
    Field(
        examples=["float32", "float64", "uint8", "uint16"]
    ),
] = "float32"

unit pydantic-field ¤

unit: Union[Literal["arbitrary unit"], SiUnit] = (
    "arbitrary unit"
)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

KerasHdf5WeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_3.Author",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "Source of the weights file.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "comment": {
      "default": "",
      "description": "A comment about this weights entry, for example how these weights were created.",
      "title": "Comment",
      "type": "string"
    },
    "tensorflow_version": {
      "$ref": "#/$defs/Version",
      "description": "TensorFlow version used to create these weights."
    }
  },
  "required": [
    "source",
    "tensorflow_version"
  ],
  "title": "model.v0_5.KerasHdf5WeightsDescr",
  "type": "object"
}

Fields:

Validators:

  • _validate

authors pydantic-field ¤

authors: Optional[List[Author]] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

comment pydantic-field ¤

comment: str = ''

A comment about this weights entry, for example how these weights were created.

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: Annotated[
    FileSource, AfterValidator(wo_special_file_name)
]

Source of the weights file.

tensorflow_version pydantic-field ¤

tensorflow_version: Version

TensorFlow version used to create these weights.

type class-attribute ¤

type: WeightsFormat = 'keras_hdf5'

weights_format_name class-attribute ¤

weights_format_name: str = 'Keras HDF5'

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

LinkedModel pydantic-model ¤

Bases: LinkedResourceBase

Reference to a bioimage.io model.

Show JSON schema:
{
  "$defs": {
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    }
  },
  "additionalProperties": false,
  "description": "Reference to a bioimage.io model.",
  "properties": {
    "version": {
      "anyOf": [
        {
          "$ref": "#/$defs/Version"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The version of the linked resource following SemVer 2.0."
    },
    "id": {
      "description": "A valid model `id` from the bioimage.io collection.",
      "minLength": 1,
      "title": "ModelId",
      "type": "string"
    }
  },
  "required": [
    "id"
  ],
  "title": "model.v0_5.LinkedModel",
  "type": "object"
}

Fields:

id pydantic-field ¤

id: ModelId

A valid model id from the bioimage.io collection.

version pydantic-field ¤

version: Optional[Version] = None

The version of the linked resource following SemVer 2.0.

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ModelDescr pydantic-model ¤

Bases: GenericModelDescrBase

Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights. These fields are typically stored in a YAML file which we call a model resource description file (model RDF).

Show JSON schema:
{
  "$defs": {
    "ArchitectureFromFileDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "Architecture source file",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "callable": {
          "description": "Identifier of the callable that returns a torch.nn.Module instance.",
          "examples": [
            "MyNetworkClass",
            "get_my_model"
          ],
          "minLength": 1,
          "title": "Identifier",
          "type": "string"
        },
        "kwargs": {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "description": "key word arguments for the `callable`",
          "title": "Kwargs",
          "type": "object"
        }
      },
      "required": [
        "source",
        "callable"
      ],
      "title": "model.v0_5.ArchitectureFromFileDescr",
      "type": "object"
    },
    "ArchitectureFromLibraryDescr": {
      "additionalProperties": false,
      "properties": {
        "callable": {
          "description": "Identifier of the callable that returns a torch.nn.Module instance.",
          "examples": [
            "MyNetworkClass",
            "get_my_model"
          ],
          "minLength": 1,
          "title": "Identifier",
          "type": "string"
        },
        "kwargs": {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "description": "key word arguments for the `callable`",
          "title": "Kwargs",
          "type": "object"
        },
        "import_from": {
          "description": "Where to import the callable from, i.e. `from <import_from> import <callable>`",
          "title": "Import From",
          "type": "string"
        }
      },
      "required": [
        "callable",
        "import_from"
      ],
      "title": "model.v0_5.ArchitectureFromLibraryDescr",
      "type": "object"
    },
    "AttachmentsDescr": {
      "additionalProperties": true,
      "properties": {
        "files": {
          "description": "File attachments",
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Files",
          "type": "array"
        }
      },
      "title": "generic.v0_2.AttachmentsDescr",
      "type": "object"
    },
    "BadgeDescr": {
      "additionalProperties": false,
      "description": "A custom badge",
      "properties": {
        "label": {
          "description": "badge label to display on hover",
          "examples": [
            "Open in Colab"
          ],
          "title": "Label",
          "type": "string"
        },
        "icon": {
          "anyOf": [
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "badge icon (included in bioimage.io package if not a URL)",
          "examples": [
            "https://colab.research.google.com/assets/colab-badge.svg"
          ],
          "title": "Icon"
        },
        "url": {
          "description": "target URL",
          "examples": [
            "https://colab.research.google.com/github/HenriquesLab/ZeroCostDL4Mic/blob/master/Colab_notebooks/U-net_2D_ZeroCostDL4Mic.ipynb"
          ],
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        }
      },
      "required": [
        "label",
        "url"
      ],
      "title": "generic.v0_2.BadgeDescr",
      "type": "object"
    },
    "BatchAxis": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "batch",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "batch",
          "title": "Type",
          "type": "string"
        },
        "size": {
          "anyOf": [
            {
              "const": 1,
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The batch size may be fixed to 1,\notherwise (the default) it may be chosen arbitrarily depending on available memory",
          "title": "Size"
        }
      },
      "required": [
        "type"
      ],
      "title": "model.v0_5.BatchAxis",
      "type": "object"
    },
    "BinarizeAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold values along `axis`",
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "title": "Threshold",
          "type": "array"
        },
        "axis": {
          "description": "The `threshold` axis",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "required": [
        "threshold",
        "axis"
      ],
      "title": "model.v0_5.BinarizeAlongAxisKwargs",
      "type": "object"
    },
    "BinarizeDescr": {
      "additionalProperties": false,
      "description": "Binarize the tensor with a fixed threshold.\n\nValues above `BinarizeKwargs.threshold`/`BinarizeAlongAxisKwargs.threshold`\nwill be set to one, values below the threshold to zero.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: binarize\n        kwargs:\n          axis: 'channel'\n          threshold: [0.25, 0.5, 0.75]\n    ```\n- in Python:\n    >>> postprocessing = [BinarizeDescr(\n    ...   kwargs=BinarizeAlongAxisKwargs(\n    ...       axis=AxisId('channel'),\n    ...       threshold=[0.25, 0.5, 0.75],\n    ...   )\n    ... )]",
      "properties": {
        "id": {
          "const": "binarize",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "anyOf": [
            {
              "$ref": "#/$defs/BinarizeKwargs"
            },
            {
              "$ref": "#/$defs/BinarizeAlongAxisKwargs"
            }
          ],
          "title": "Kwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.BinarizeDescr",
      "type": "object"
    },
    "BinarizeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold",
          "title": "Threshold",
          "type": "number"
        }
      },
      "required": [
        "threshold"
      ],
      "title": "model.v0_5.BinarizeKwargs",
      "type": "object"
    },
    "ChannelAxis": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "channel",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "channel",
          "title": "Type",
          "type": "string"
        },
        "channel_names": {
          "items": {
            "minLength": 1,
            "title": "Identifier",
            "type": "string"
          },
          "minItems": 1,
          "title": "Channel Names",
          "type": "array"
        }
      },
      "required": [
        "type",
        "channel_names"
      ],
      "title": "model.v0_5.ChannelAxis",
      "type": "object"
    },
    "ClipDescr": {
      "additionalProperties": false,
      "description": "Set tensor values below min to min and above max to max.\n\nSee `ScaleRangeDescr` for examples.",
      "properties": {
        "id": {
          "const": "clip",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ClipKwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.ClipDescr",
      "type": "object"
    },
    "ClipKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ClipDescr`",
      "properties": {
        "min": {
          "description": "minimum value for clipping",
          "title": "Min",
          "type": "number"
        },
        "max": {
          "description": "maximum value for clipping",
          "title": "Max",
          "type": "number"
        }
      },
      "required": [
        "min",
        "max"
      ],
      "title": "model.v0_4.ClipKwargs",
      "type": "object"
    },
    "DataDependentSize": {
      "additionalProperties": false,
      "properties": {
        "min": {
          "default": 1,
          "exclusiveMinimum": 0,
          "title": "Min",
          "type": "integer"
        },
        "max": {
          "anyOf": [
            {
              "exclusiveMinimum": 1,
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Max"
        }
      },
      "title": "model.v0_5.DataDependentSize",
      "type": "object"
    },
    "Datetime": {
      "description": "Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format\nwith a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat).",
      "format": "date-time",
      "title": "Datetime",
      "type": "string"
    },
    "EnsureDtypeDescr": {
      "additionalProperties": false,
      "description": "Cast the tensor data type to `EnsureDtypeKwargs.dtype` (if not matching).\n\nThis can for example be used to ensure the inner neural network model gets a\ndifferent input tensor data type than the fully described bioimage.io model does.\n\nExamples:\n    The described bioimage.io model (incl. preprocessing) accepts any\n    float32-compatible tensor, normalizes it with percentiles and clipping and then\n    casts it to uint8, which is what the neural network in this example expects.\n    - in YAML\n        ```yaml\n        inputs:\n        - data:\n            type: float32  # described bioimage.io model is compatible with any float32 input tensor\n          preprocessing:\n          - id: scale_range\n              kwargs:\n              axes: ['y', 'x']\n              max_percentile: 99.8\n              min_percentile: 5.0\n          - id: clip\n              kwargs:\n              min: 0.0\n              max: 1.0\n          - id: ensure_dtype  # the neural network of the model requires uint8\n              kwargs:\n              dtype: uint8\n        ```\n    - in Python:\n        >>> preprocessing = [\n        ...     ScaleRangeDescr(\n        ...         kwargs=ScaleRangeKwargs(\n        ...           axes= (AxisId('y'), AxisId('x')),\n        ...           max_percentile= 99.8,\n        ...           min_percentile= 5.0,\n        ...         )\n        ...     ),\n        ...     ClipDescr(kwargs=ClipKwargs(min=0.0, max=1.0)),\n        ...     EnsureDtypeDescr(kwargs=EnsureDtypeKwargs(dtype=\"uint8\")),\n        ... ]",
      "properties": {
        "id": {
          "const": "ensure_dtype",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/EnsureDtypeKwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.EnsureDtypeDescr",
      "type": "object"
    },
    "EnsureDtypeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `EnsureDtypeDescr`",
      "properties": {
        "dtype": {
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64",
            "bool"
          ],
          "title": "Dtype",
          "type": "string"
        }
      },
      "required": [
        "dtype"
      ],
      "title": "model.v0_5.EnsureDtypeKwargs",
      "type": "object"
    },
    "FileDescr": {
      "additionalProperties": false,
      "description": "A file description",
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "File source",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        }
      },
      "required": [
        "source"
      ],
      "title": "_internal.io.FileDescr",
      "type": "object"
    },
    "FixedZeroMeanUnitVarianceAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `FixedZeroMeanUnitVarianceDescr`",
      "properties": {
        "mean": {
          "description": "The mean value(s) to normalize with.",
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "title": "Mean",
          "type": "array"
        },
        "std": {
          "description": "The standard deviation value(s) to normalize with.\nSize must match `mean` values.",
          "items": {
            "minimum": 1e-06,
            "type": "number"
          },
          "minItems": 1,
          "title": "Std",
          "type": "array"
        },
        "axis": {
          "description": "The axis of the mean/std values to normalize each entry along that dimension\nseparately.",
          "examples": [
            "channel",
            "index"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "required": [
        "mean",
        "std",
        "axis"
      ],
      "title": "model.v0_5.FixedZeroMeanUnitVarianceAlongAxisKwargs",
      "type": "object"
    },
    "FixedZeroMeanUnitVarianceDescr": {
      "additionalProperties": false,
      "description": "Subtract a given mean and divide by the standard deviation.\n\nNormalize with fixed, precomputed values for\n`FixedZeroMeanUnitVarianceKwargs.mean` and `FixedZeroMeanUnitVarianceKwargs.std`\nUse `FixedZeroMeanUnitVarianceAlongAxisKwargs` for independent scaling along given\naxes.\n\nExamples:\n1. scalar value for whole tensor\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: fixed_zero_mean_unit_variance\n        kwargs:\n          mean: 103.5\n          std: 13.7\n    ```\n    - in Python\n    >>> preprocessing = [FixedZeroMeanUnitVarianceDescr(\n    ...   kwargs=FixedZeroMeanUnitVarianceKwargs(mean=103.5, std=13.7)\n    ... )]\n\n2. independently along an axis\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: fixed_zero_mean_unit_variance\n        kwargs:\n          axis: channel\n          mean: [101.5, 102.5, 103.5]\n          std: [11.7, 12.7, 13.7]\n    ```\n    - in Python\n    >>> preprocessing = [FixedZeroMeanUnitVarianceDescr(\n    ...   kwargs=FixedZeroMeanUnitVarianceAlongAxisKwargs(\n    ...     axis=AxisId(\"channel\"),\n    ...     mean=[101.5, 102.5, 103.5],\n    ...     std=[11.7, 12.7, 13.7],\n    ...   )\n    ... )]",
      "properties": {
        "id": {
          "const": "fixed_zero_mean_unit_variance",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "anyOf": [
            {
              "$ref": "#/$defs/FixedZeroMeanUnitVarianceKwargs"
            },
            {
              "$ref": "#/$defs/FixedZeroMeanUnitVarianceAlongAxisKwargs"
            }
          ],
          "title": "Kwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.FixedZeroMeanUnitVarianceDescr",
      "type": "object"
    },
    "FixedZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `FixedZeroMeanUnitVarianceDescr`",
      "properties": {
        "mean": {
          "description": "The mean value to normalize with.",
          "title": "Mean",
          "type": "number"
        },
        "std": {
          "description": "The standard deviation value to normalize with.",
          "minimum": 1e-06,
          "title": "Std",
          "type": "number"
        }
      },
      "required": [
        "mean",
        "std"
      ],
      "title": "model.v0_5.FixedZeroMeanUnitVarianceKwargs",
      "type": "object"
    },
    "IndexInputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/ParameterizedSize"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
          "examples": [
            10,
            {
              "min": 32,
              "step": 16
            },
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "index",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "index",
          "title": "Type",
          "type": "string"
        },
        "concatenable": {
          "default": false,
          "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
          "title": "Concatenable",
          "type": "boolean"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.IndexInputAxis",
      "type": "object"
    },
    "IndexOutputAxis": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "index",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "index",
          "title": "Type",
          "type": "string"
        },
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/SizeReference"
            },
            {
              "$ref": "#/$defs/DataDependentSize"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (`SizeReference`)\n- data dependent size using `DataDependentSize` (size is only known after model inference)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        }
      },
      "required": [
        "type",
        "size"
      ],
      "title": "model.v0_5.IndexOutputAxis",
      "type": "object"
    },
    "InputTensorDescr": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "input",
          "description": "Input tensor id.\nNo duplicates are allowed across all inputs and outputs.",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "free text description",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "axes": {
          "description": "tensor axes",
          "items": {
            "discriminator": {
              "mapping": {
                "batch": "#/$defs/BatchAxis",
                "channel": "#/$defs/ChannelAxis",
                "index": "#/$defs/IndexInputAxis",
                "space": "#/$defs/SpaceInputAxis",
                "time": "#/$defs/TimeInputAxis"
              },
              "propertyName": "type"
            },
            "oneOf": [
              {
                "$ref": "#/$defs/BatchAxis"
              },
              {
                "$ref": "#/$defs/ChannelAxis"
              },
              {
                "$ref": "#/$defs/IndexInputAxis"
              },
              {
                "$ref": "#/$defs/TimeInputAxis"
              },
              {
                "$ref": "#/$defs/SpaceInputAxis"
              }
            ]
          },
          "minItems": 1,
          "title": "Axes",
          "type": "array"
        },
        "test_tensor": {
          "anyOf": [
            {
              "$ref": "#/$defs/FileDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An example tensor to use for testing.\nUsing the model with the test input tensors is expected to yield the test output tensors.\nEach test tensor has be a an ndarray in the\n[numpy.lib file format](https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html#module-numpy.lib.format).\nThe file extension must be '.npy'."
        },
        "sample_tensor": {
          "anyOf": [
            {
              "$ref": "#/$defs/FileDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A sample tensor to illustrate a possible input/output for the model,\nThe sample image primarily serves to inform a human user about an example use case\nand is typically stored as .hdf5, .png or .tiff.\nIt has to be readable by the [imageio library](https://imageio.readthedocs.io/en/stable/formats/index.html#supported-formats)\n(numpy's `.npy` format is not supported).\nThe image dimensionality has to match the number of axes specified in this tensor description."
        },
        "data": {
          "anyOf": [
            {
              "$ref": "#/$defs/NominalOrOrdinalDataDescr"
            },
            {
              "$ref": "#/$defs/IntervalOrRatioDataDescr"
            },
            {
              "items": {
                "anyOf": [
                  {
                    "$ref": "#/$defs/NominalOrOrdinalDataDescr"
                  },
                  {
                    "$ref": "#/$defs/IntervalOrRatioDataDescr"
                  }
                ]
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "default": {
            "type": "float32",
            "range": [
              null,
              null
            ],
            "unit": "arbitrary unit",
            "scale": 1.0,
            "offset": null
          },
          "description": "Description of the tensor's data values, optionally per channel.\nIf specified per channel, the data `type` needs to match across channels.",
          "title": "Data"
        },
        "optional": {
          "default": false,
          "description": "indicates that this tensor may be `None`",
          "title": "Optional",
          "type": "boolean"
        },
        "preprocessing": {
          "description": "Description of how this input should be preprocessed.\n\nnotes:\n- If preprocessing does not start with an 'ensure_dtype' entry, it is added\n  to ensure an input tensor's data type matches the input tensor's data description.\n- If preprocessing does not end with an 'ensure_dtype' or 'binarize' entry, an\n  'ensure_dtype' step is added to ensure preprocessing steps are not unintentionally\n  changing the data type.",
          "items": {
            "discriminator": {
              "mapping": {
                "binarize": "#/$defs/BinarizeDescr",
                "clip": "#/$defs/ClipDescr",
                "ensure_dtype": "#/$defs/EnsureDtypeDescr",
                "fixed_zero_mean_unit_variance": "#/$defs/FixedZeroMeanUnitVarianceDescr",
                "scale_linear": "#/$defs/ScaleLinearDescr",
                "scale_range": "#/$defs/ScaleRangeDescr",
                "sigmoid": "#/$defs/SigmoidDescr",
                "softmax": "#/$defs/SoftmaxDescr",
                "zero_mean_unit_variance": "#/$defs/ZeroMeanUnitVarianceDescr"
              },
              "propertyName": "id"
            },
            "oneOf": [
              {
                "$ref": "#/$defs/BinarizeDescr"
              },
              {
                "$ref": "#/$defs/ClipDescr"
              },
              {
                "$ref": "#/$defs/EnsureDtypeDescr"
              },
              {
                "$ref": "#/$defs/FixedZeroMeanUnitVarianceDescr"
              },
              {
                "$ref": "#/$defs/ScaleLinearDescr"
              },
              {
                "$ref": "#/$defs/ScaleRangeDescr"
              },
              {
                "$ref": "#/$defs/SigmoidDescr"
              },
              {
                "$ref": "#/$defs/SoftmaxDescr"
              },
              {
                "$ref": "#/$defs/ZeroMeanUnitVarianceDescr"
              }
            ]
          },
          "title": "Preprocessing",
          "type": "array"
        }
      },
      "required": [
        "axes"
      ],
      "title": "model.v0_5.InputTensorDescr",
      "type": "object"
    },
    "IntervalOrRatioDataDescr": {
      "additionalProperties": false,
      "properties": {
        "type": {
          "default": "float32",
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64"
          ],
          "examples": [
            "float32",
            "float64",
            "uint8",
            "uint16"
          ],
          "title": "Type",
          "type": "string"
        },
        "range": {
          "default": [
            null,
            null
          ],
          "description": "Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.\n`None` corresponds to min/max of what can be expressed by **type**.",
          "maxItems": 2,
          "minItems": 2,
          "prefixItems": [
            {
              "anyOf": [
                {
                  "type": "number"
                },
                {
                  "type": "null"
                }
              ]
            },
            {
              "anyOf": [
                {
                  "type": "number"
                },
                {
                  "type": "null"
                }
              ]
            }
          ],
          "title": "Range",
          "type": "array"
        },
        "unit": {
          "anyOf": [
            {
              "const": "arbitrary unit",
              "type": "string"
            },
            {
              "description": "An SI unit",
              "minLength": 1,
              "pattern": "^(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?((\u00b7(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?)|(/(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^+?[1-9]\\d*)?))*$",
              "title": "SiUnit",
              "type": "string"
            }
          ],
          "default": "arbitrary unit",
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "description": "Scale for data on an interval (or ratio) scale.",
          "title": "Scale",
          "type": "number"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Offset for data on a ratio scale.",
          "title": "Offset"
        }
      },
      "title": "model.v0_5.IntervalOrRatioDataDescr",
      "type": "object"
    },
    "KerasHdf5WeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "Source of the weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "tensorflow_version": {
          "$ref": "#/$defs/Version",
          "description": "TensorFlow version used to create these weights."
        }
      },
      "required": [
        "source",
        "tensorflow_version"
      ],
      "title": "model.v0_5.KerasHdf5WeightsDescr",
      "type": "object"
    },
    "LinkedDataset": {
      "additionalProperties": false,
      "description": "Reference to a bioimage.io dataset.",
      "properties": {
        "version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The version of the linked resource following SemVer 2.0."
        },
        "id": {
          "description": "A valid dataset `id` from the bioimage.io collection.",
          "minLength": 1,
          "title": "DatasetId",
          "type": "string"
        }
      },
      "required": [
        "id"
      ],
      "title": "dataset.v0_3.LinkedDataset",
      "type": "object"
    },
    "LinkedModel": {
      "additionalProperties": false,
      "description": "Reference to a bioimage.io model.",
      "properties": {
        "version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The version of the linked resource following SemVer 2.0."
        },
        "id": {
          "description": "A valid model `id` from the bioimage.io collection.",
          "minLength": 1,
          "title": "ModelId",
          "type": "string"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.LinkedModel",
      "type": "object"
    },
    "NominalOrOrdinalDataDescr": {
      "additionalProperties": false,
      "properties": {
        "values": {
          "anyOf": [
            {
              "items": {
                "type": "integer"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "boolean"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "string"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "description": "A fixed set of nominal or an ascending sequence of ordinal values.\nIn this case `data.type` is required to be an unsigend integer type, e.g. 'uint8'.\nString `values` are interpreted as labels for tensor values 0, ..., N.\nNote: as YAML 1.2 does not natively support a \"set\" datatype,\nnominal values should be given as a sequence (aka list/array) as well.",
          "title": "Values"
        },
        "type": {
          "default": "uint8",
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64",
            "bool"
          ],
          "examples": [
            "float32",
            "uint8",
            "uint16",
            "int64",
            "bool"
          ],
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "const": "arbitrary unit",
              "type": "string"
            },
            {
              "description": "An SI unit",
              "minLength": 1,
              "pattern": "^(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?((\u00b7(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?)|(/(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^+?[1-9]\\d*)?))*$",
              "title": "SiUnit",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        }
      },
      "required": [
        "values"
      ],
      "title": "model.v0_5.NominalOrOrdinalDataDescr",
      "type": "object"
    },
    "OnnxWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "Source of the weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "opset_version": {
          "description": "ONNX opset version",
          "minimum": 7,
          "title": "Opset Version",
          "type": "integer"
        },
        "external_data": {
          "anyOf": [
            {
              "$ref": "#/$defs/FileDescr",
              "examples": [
                {
                  "source": "weights.onnx.data"
                }
              ]
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Source of the external ONNX data file holding the weights.\n(If present **source** holds the ONNX architecture without weights)."
        }
      },
      "required": [
        "source",
        "opset_version"
      ],
      "title": "model.v0_5.OnnxWeightsDescr",
      "type": "object"
    },
    "OutputTensorDescr": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "output",
          "description": "Output tensor id.\nNo duplicates are allowed across all inputs and outputs.",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "free text description",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "axes": {
          "description": "tensor axes",
          "items": {
            "discriminator": {
              "mapping": {
                "batch": "#/$defs/BatchAxis",
                "channel": "#/$defs/ChannelAxis",
                "index": "#/$defs/IndexOutputAxis",
                "space": {
                  "oneOf": [
                    {
                      "$ref": "#/$defs/SpaceOutputAxis"
                    },
                    {
                      "$ref": "#/$defs/SpaceOutputAxisWithHalo"
                    }
                  ]
                },
                "time": {
                  "oneOf": [
                    {
                      "$ref": "#/$defs/TimeOutputAxis"
                    },
                    {
                      "$ref": "#/$defs/TimeOutputAxisWithHalo"
                    }
                  ]
                }
              },
              "propertyName": "type"
            },
            "oneOf": [
              {
                "$ref": "#/$defs/BatchAxis"
              },
              {
                "$ref": "#/$defs/ChannelAxis"
              },
              {
                "$ref": "#/$defs/IndexOutputAxis"
              },
              {
                "oneOf": [
                  {
                    "$ref": "#/$defs/TimeOutputAxis"
                  },
                  {
                    "$ref": "#/$defs/TimeOutputAxisWithHalo"
                  }
                ]
              },
              {
                "oneOf": [
                  {
                    "$ref": "#/$defs/SpaceOutputAxis"
                  },
                  {
                    "$ref": "#/$defs/SpaceOutputAxisWithHalo"
                  }
                ]
              }
            ]
          },
          "minItems": 1,
          "title": "Axes",
          "type": "array"
        },
        "test_tensor": {
          "anyOf": [
            {
              "$ref": "#/$defs/FileDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An example tensor to use for testing.\nUsing the model with the test input tensors is expected to yield the test output tensors.\nEach test tensor has be a an ndarray in the\n[numpy.lib file format](https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html#module-numpy.lib.format).\nThe file extension must be '.npy'."
        },
        "sample_tensor": {
          "anyOf": [
            {
              "$ref": "#/$defs/FileDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A sample tensor to illustrate a possible input/output for the model,\nThe sample image primarily serves to inform a human user about an example use case\nand is typically stored as .hdf5, .png or .tiff.\nIt has to be readable by the [imageio library](https://imageio.readthedocs.io/en/stable/formats/index.html#supported-formats)\n(numpy's `.npy` format is not supported).\nThe image dimensionality has to match the number of axes specified in this tensor description."
        },
        "data": {
          "anyOf": [
            {
              "$ref": "#/$defs/NominalOrOrdinalDataDescr"
            },
            {
              "$ref": "#/$defs/IntervalOrRatioDataDescr"
            },
            {
              "items": {
                "anyOf": [
                  {
                    "$ref": "#/$defs/NominalOrOrdinalDataDescr"
                  },
                  {
                    "$ref": "#/$defs/IntervalOrRatioDataDescr"
                  }
                ]
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "default": {
            "type": "float32",
            "range": [
              null,
              null
            ],
            "unit": "arbitrary unit",
            "scale": 1.0,
            "offset": null
          },
          "description": "Description of the tensor's data values, optionally per channel.\nIf specified per channel, the data `type` needs to match across channels.",
          "title": "Data"
        },
        "postprocessing": {
          "description": "Description of how this output should be postprocessed.\n\nnote: `postprocessing` always ends with an 'ensure_dtype' operation.\n      If not given this is added to cast to this tensor's `data.type`.",
          "items": {
            "discriminator": {
              "mapping": {
                "binarize": "#/$defs/BinarizeDescr",
                "clip": "#/$defs/ClipDescr",
                "ensure_dtype": "#/$defs/EnsureDtypeDescr",
                "fixed_zero_mean_unit_variance": "#/$defs/FixedZeroMeanUnitVarianceDescr",
                "scale_linear": "#/$defs/ScaleLinearDescr",
                "scale_mean_variance": "#/$defs/ScaleMeanVarianceDescr",
                "scale_range": "#/$defs/ScaleRangeDescr",
                "sigmoid": "#/$defs/SigmoidDescr",
                "softmax": "#/$defs/SoftmaxDescr",
                "zero_mean_unit_variance": "#/$defs/ZeroMeanUnitVarianceDescr"
              },
              "propertyName": "id"
            },
            "oneOf": [
              {
                "$ref": "#/$defs/BinarizeDescr"
              },
              {
                "$ref": "#/$defs/ClipDescr"
              },
              {
                "$ref": "#/$defs/EnsureDtypeDescr"
              },
              {
                "$ref": "#/$defs/FixedZeroMeanUnitVarianceDescr"
              },
              {
                "$ref": "#/$defs/ScaleLinearDescr"
              },
              {
                "$ref": "#/$defs/ScaleMeanVarianceDescr"
              },
              {
                "$ref": "#/$defs/ScaleRangeDescr"
              },
              {
                "$ref": "#/$defs/SigmoidDescr"
              },
              {
                "$ref": "#/$defs/SoftmaxDescr"
              },
              {
                "$ref": "#/$defs/ZeroMeanUnitVarianceDescr"
              }
            ]
          },
          "title": "Postprocessing",
          "type": "array"
        }
      },
      "required": [
        "axes"
      ],
      "title": "model.v0_5.OutputTensorDescr",
      "type": "object"
    },
    "ParameterizedSize": {
      "additionalProperties": false,
      "description": "Describes a range of valid tensor axis sizes as `size = min + n*step`.\n\n- **min** and **step** are given by the model description.\n- All blocksize paramters n = 0,1,2,... yield a valid `size`.\n- A greater blocksize paramter n = 0,1,2,... results in a greater **size**.\n  This allows to adjust the axis size more generically.",
      "properties": {
        "min": {
          "exclusiveMinimum": 0,
          "title": "Min",
          "type": "integer"
        },
        "step": {
          "exclusiveMinimum": 0,
          "title": "Step",
          "type": "integer"
        }
      },
      "required": [
        "min",
        "step"
      ],
      "title": "model.v0_5.ParameterizedSize",
      "type": "object"
    },
    "PytorchStateDictWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "Source of the weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "architecture": {
          "anyOf": [
            {
              "$ref": "#/$defs/ArchitectureFromFileDescr"
            },
            {
              "$ref": "#/$defs/ArchitectureFromLibraryDescr"
            }
          ],
          "title": "Architecture"
        },
        "pytorch_version": {
          "$ref": "#/$defs/Version",
          "description": "Version of the PyTorch library used.\nIf `architecture.depencencies` is specified it has to include pytorch and any version pinning has to be compatible."
        },
        "dependencies": {
          "anyOf": [
            {
              "$ref": "#/$defs/FileDescr",
              "examples": [
                {
                  "source": "environment.yaml"
                }
              ]
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Custom depencies beyond pytorch described in a Conda environment file.\nAllows to specify custom dependencies, see conda docs:\n- [Exporting an environment file across platforms](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#exporting-an-environment-file-across-platforms)\n- [Creating an environment file manually](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-file-manually)\n\nThe conda environment file should include pytorch and any version pinning has to be compatible with\n**pytorch_version**."
        }
      },
      "required": [
        "source",
        "architecture",
        "pytorch_version"
      ],
      "title": "model.v0_5.PytorchStateDictWeightsDescr",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "ReproducibilityTolerance": {
      "additionalProperties": true,
      "description": "Describes what small numerical differences -- if any -- may be tolerated\nin the generated output when executing in different environments.\n\nA tensor element *output* is considered mismatched to the **test_tensor** if\nabs(*output* - **test_tensor**) > **absolute_tolerance** + **relative_tolerance** * abs(**test_tensor**).\n(Internally we call [numpy.testing.assert_allclose](https://numpy.org/doc/stable/reference/generated/numpy.testing.assert_allclose.html).)\n\nMotivation:\n    For testing we can request the respective deep learning frameworks to be as\n    reproducible as possible by setting seeds and chosing deterministic algorithms,\n    but differences in operating systems, available hardware and installed drivers\n    may still lead to numerical differences.",
      "properties": {
        "relative_tolerance": {
          "default": 0.001,
          "description": "Maximum relative tolerance of reproduced test tensor.",
          "maximum": 0.01,
          "minimum": 0,
          "title": "Relative Tolerance",
          "type": "number"
        },
        "absolute_tolerance": {
          "default": 0.0001,
          "description": "Maximum absolute tolerance of reproduced test tensor.",
          "minimum": 0,
          "title": "Absolute Tolerance",
          "type": "number"
        },
        "mismatched_elements_per_million": {
          "default": 100,
          "description": "Maximum number of mismatched elements/pixels per million to tolerate.",
          "maximum": 1000,
          "minimum": 0,
          "title": "Mismatched Elements Per Million",
          "type": "integer"
        },
        "output_ids": {
          "default": [],
          "description": "Limits the output tensor IDs these reproducibility details apply to.",
          "items": {
            "maxLength": 32,
            "minLength": 1,
            "title": "TensorId",
            "type": "string"
          },
          "title": "Output Ids",
          "type": "array"
        },
        "weights_formats": {
          "default": [],
          "description": "Limits the weights formats these details apply to.",
          "items": {
            "enum": [
              "keras_hdf5",
              "onnx",
              "pytorch_state_dict",
              "tensorflow_js",
              "tensorflow_saved_model_bundle",
              "torchscript"
            ],
            "type": "string"
          },
          "title": "Weights Formats",
          "type": "array"
        }
      },
      "title": "model.v0_5.ReproducibilityTolerance",
      "type": "object"
    },
    "RunMode": {
      "additionalProperties": false,
      "properties": {
        "name": {
          "anyOf": [
            {
              "const": "deepimagej",
              "type": "string"
            },
            {
              "type": "string"
            }
          ],
          "description": "Run mode name",
          "title": "Name"
        },
        "kwargs": {
          "additionalProperties": true,
          "description": "Run mode specific key word arguments",
          "title": "Kwargs",
          "type": "object"
        }
      },
      "required": [
        "name"
      ],
      "title": "model.v0_4.RunMode",
      "type": "object"
    },
    "ScaleLinearAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "Key word arguments for `ScaleLinearDescr`",
      "properties": {
        "axis": {
          "description": "The axis of gain and offset values.",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "gain": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "default": 0.0,
          "description": "additive term",
          "title": "Offset"
        }
      },
      "required": [
        "axis"
      ],
      "title": "model.v0_5.ScaleLinearAlongAxisKwargs",
      "type": "object"
    },
    "ScaleLinearDescr": {
      "additionalProperties": false,
      "description": "Fixed linear scaling.\n\nExamples:\n  1. Scale with scalar gain and offset\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_linear\n        kwargs:\n          gain: 2.0\n          offset: 3.0\n    ```\n    - in Python:\n    >>> preprocessing = [\n    ...     ScaleLinearDescr(kwargs=ScaleLinearKwargs(gain= 2.0, offset=3.0))\n    ... ]\n\n  2. Independent scaling along an axis\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_linear\n        kwargs:\n          axis: 'channel'\n          gain: [1.0, 2.0, 3.0]\n    ```\n    - in Python:\n    >>> preprocessing = [\n    ...     ScaleLinearDescr(\n    ...         kwargs=ScaleLinearAlongAxisKwargs(\n    ...             axis=AxisId(\"channel\"),\n    ...             gain=[1.0, 2.0, 3.0],\n    ...         )\n    ...     )\n    ... ]",
      "properties": {
        "id": {
          "const": "scale_linear",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "anyOf": [
            {
              "$ref": "#/$defs/ScaleLinearKwargs"
            },
            {
              "$ref": "#/$defs/ScaleLinearAlongAxisKwargs"
            }
          ],
          "title": "Kwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.ScaleLinearDescr",
      "type": "object"
    },
    "ScaleLinearKwargs": {
      "additionalProperties": false,
      "description": "Key word arguments for `ScaleLinearDescr`",
      "properties": {
        "gain": {
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain",
          "type": "number"
        },
        "offset": {
          "default": 0.0,
          "description": "additive term",
          "title": "Offset",
          "type": "number"
        }
      },
      "title": "model.v0_5.ScaleLinearKwargs",
      "type": "object"
    },
    "ScaleMeanVarianceDescr": {
      "additionalProperties": false,
      "description": "Scale a tensor's data distribution to match another tensor's mean/std.\n`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.`",
      "properties": {
        "id": {
          "const": "scale_mean_variance",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleMeanVarianceKwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.ScaleMeanVarianceDescr",
      "type": "object"
    },
    "ScaleMeanVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleMeanVarianceKwargs`",
      "properties": {
        "reference_tensor": {
          "description": "Name of tensor to match.",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axes": {
          "anyOf": [
            {
              "items": {
                "maxLength": 16,
                "minLength": 1,
                "title": "AxisId",
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize samples independently, leave out the 'batch' axis.\nDefault: Scale all axes jointly.",
          "examples": [
            [
              "batch",
              "x",
              "y"
            ]
          ],
          "title": "Axes"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability:\n`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.`",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "required": [
        "reference_tensor"
      ],
      "title": "model.v0_5.ScaleMeanVarianceKwargs",
      "type": "object"
    },
    "ScaleRangeDescr": {
      "additionalProperties": false,
      "description": "Scale with percentiles.\n\nExamples:\n1. Scale linearly to map 5th percentile to 0 and 99.8th percentile to 1.0\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_range\n        kwargs:\n          axes: ['y', 'x']\n          max_percentile: 99.8\n          min_percentile: 5.0\n    ```\n    - in Python\n    >>> preprocessing = [\n    ...     ScaleRangeDescr(\n    ...         kwargs=ScaleRangeKwargs(\n    ...           axes= (AxisId('y'), AxisId('x')),\n    ...           max_percentile= 99.8,\n    ...           min_percentile= 5.0,\n    ...         )\n    ...     )\n    ... ]\n\n  2. Combine the above scaling with additional clipping to clip values outside the range given by the percentiles.\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_range\n        kwargs:\n          axes: ['y', 'x']\n          max_percentile: 99.8\n          min_percentile: 5.0\n              - id: scale_range\n       - id: clip\n         kwargs:\n          min: 0.0\n          max: 1.0\n    ```\n    - in Python\n    >>> preprocessing = [\n    ...   ScaleRangeDescr(\n    ...     kwargs=ScaleRangeKwargs(\n    ...       axes= (AxisId('y'), AxisId('x')),\n    ...       max_percentile= 99.8,\n    ...       min_percentile= 5.0,\n    ...     )\n    ...   ),\n    ...   ClipDescr(\n    ...     kwargs=ClipKwargs(\n    ...       min=0.0,\n    ...       max=1.0,\n    ...     )\n    ...   ),\n    ... ]",
      "properties": {
        "id": {
          "const": "scale_range",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleRangeKwargs"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.ScaleRangeDescr",
      "type": "object"
    },
    "ScaleRangeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleRangeDescr`\n\nFor `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)\nthis processing step normalizes data to the [0, 1] intervall.\nFor other percentiles the normalized values will partially be outside the [0, 1]\nintervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the\nnormalized values to a range.",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "items": {
                "maxLength": 16,
                "minLength": 1,
                "title": "AxisId",
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute the min/max percentile value.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize samples independently, leave out the \"batch\" axis.\nDefault: Scale all axes jointly.",
          "examples": [
            [
              "batch",
              "x",
              "y"
            ]
          ],
          "title": "Axes"
        },
        "min_percentile": {
          "default": 0.0,
          "description": "The lower percentile used to determine the value to align with zero.",
          "exclusiveMaximum": 100,
          "minimum": 0,
          "title": "Min Percentile",
          "type": "number"
        },
        "max_percentile": {
          "default": 100.0,
          "description": "The upper percentile used to determine the value to align with one.\nHas to be bigger than `min_percentile`.\nThe range is 1 to 100 instead of 0 to 100 to avoid mistakenly\naccepting percentiles specified in the range 0.0 to 1.0.",
          "exclusiveMinimum": 1,
          "maximum": 100,
          "title": "Max Percentile",
          "type": "number"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability.\n`out = (tensor - v_lower) / (v_upper - v_lower + eps)`;\nwith `v_lower,v_upper` values at the respective percentiles.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        },
        "reference_tensor": {
          "anyOf": [
            {
              "maxLength": 32,
              "minLength": 1,
              "title": "TensorId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tensor ID to compute the percentiles from. Default: The tensor itself.\nFor any tensor in `inputs` only input tensor references are allowed.",
          "title": "Reference Tensor"
        }
      },
      "title": "model.v0_5.ScaleRangeKwargs",
      "type": "object"
    },
    "SigmoidDescr": {
      "additionalProperties": false,
      "description": "The logistic sigmoid function, a.k.a. expit function.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: sigmoid\n    ```\n- in Python:\n    >>> postprocessing = [SigmoidDescr()]",
      "properties": {
        "id": {
          "const": "sigmoid",
          "title": "Id",
          "type": "string"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.SigmoidDescr",
      "type": "object"
    },
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    },
    "SoftmaxDescr": {
      "additionalProperties": false,
      "description": "The softmax function.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: softmax\n        kwargs:\n          axis: channel\n    ```\n- in Python:\n    >>> postprocessing = [SoftmaxDescr(kwargs=SoftmaxKwargs(axis=AxisId(\"channel\")))]",
      "properties": {
        "id": {
          "const": "softmax",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/SoftmaxKwargs"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.SoftmaxDescr",
      "type": "object"
    },
    "SoftmaxKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `SoftmaxDescr`",
      "properties": {
        "axis": {
          "default": "channel",
          "description": "The axis to apply the softmax function along.\nNote:\n    Defaults to 'channel' axis\n    (which may not exist, in which case\n    a different axis id has to be specified).",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "title": "model.v0_5.SoftmaxKwargs",
      "type": "object"
    },
    "SpaceInputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/ParameterizedSize"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
          "examples": [
            10,
            {
              "min": 32,
              "step": 16
            },
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "x",
          "examples": [
            "x",
            "y",
            "z"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "space",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attometer",
                "angstrom",
                "centimeter",
                "decimeter",
                "exameter",
                "femtometer",
                "foot",
                "gigameter",
                "hectometer",
                "inch",
                "kilometer",
                "megameter",
                "meter",
                "micrometer",
                "mile",
                "millimeter",
                "nanometer",
                "parsec",
                "petameter",
                "picometer",
                "terameter",
                "yard",
                "yoctometer",
                "yottameter",
                "zeptometer",
                "zettameter"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        },
        "concatenable": {
          "default": false,
          "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
          "title": "Concatenable",
          "type": "boolean"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.SpaceInputAxis",
      "type": "object"
    },
    "SpaceOutputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "x",
          "examples": [
            "x",
            "y",
            "z"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "space",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attometer",
                "angstrom",
                "centimeter",
                "decimeter",
                "exameter",
                "femtometer",
                "foot",
                "gigameter",
                "hectometer",
                "inch",
                "kilometer",
                "megameter",
                "meter",
                "micrometer",
                "mile",
                "millimeter",
                "nanometer",
                "parsec",
                "petameter",
                "picometer",
                "terameter",
                "yard",
                "yoctometer",
                "yottameter",
                "zeptometer",
                "zettameter"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.SpaceOutputAxis",
      "type": "object"
    },
    "SpaceOutputAxisWithHalo": {
      "additionalProperties": false,
      "properties": {
        "halo": {
          "description": "The halo should be cropped from the output tensor to avoid boundary effects.\nIt is to be cropped from both sides, i.e. `size_after_crop = size - 2 * halo`.\nTo document a halo that is already cropped by the model use `size.offset` instead.",
          "minimum": 1,
          "title": "Halo",
          "type": "integer"
        },
        "size": {
          "$ref": "#/$defs/SizeReference",
          "description": "reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ]
        },
        "id": {
          "default": "x",
          "examples": [
            "x",
            "y",
            "z"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "space",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attometer",
                "angstrom",
                "centimeter",
                "decimeter",
                "exameter",
                "femtometer",
                "foot",
                "gigameter",
                "hectometer",
                "inch",
                "kilometer",
                "megameter",
                "meter",
                "micrometer",
                "mile",
                "millimeter",
                "nanometer",
                "parsec",
                "petameter",
                "picometer",
                "terameter",
                "yard",
                "yoctometer",
                "yottameter",
                "zeptometer",
                "zettameter"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "halo",
        "size",
        "type"
      ],
      "title": "model.v0_5.SpaceOutputAxisWithHalo",
      "type": "object"
    },
    "TensorflowJsWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The multi-file weights.\nAll required files/folders should be a zip archive.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "tensorflow_version": {
          "$ref": "#/$defs/Version",
          "description": "Version of the TensorFlow library used."
        }
      },
      "required": [
        "source",
        "tensorflow_version"
      ],
      "title": "model.v0_5.TensorflowJsWeightsDescr",
      "type": "object"
    },
    "TensorflowSavedModelBundleWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The multi-file weights.\nAll required files/folders should be a zip archive.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "tensorflow_version": {
          "$ref": "#/$defs/Version",
          "description": "Version of the TensorFlow library used."
        },
        "dependencies": {
          "anyOf": [
            {
              "$ref": "#/$defs/FileDescr",
              "examples": [
                {
                  "source": "environment.yaml"
                }
              ]
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Custom dependencies beyond tensorflow.\nShould include tensorflow and any version pinning has to be compatible with **tensorflow_version**."
        }
      },
      "required": [
        "source",
        "tensorflow_version"
      ],
      "title": "model.v0_5.TensorflowSavedModelBundleWeightsDescr",
      "type": "object"
    },
    "TimeInputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/ParameterizedSize"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
          "examples": [
            10,
            {
              "min": 32,
              "step": 16
            },
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "time",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "time",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attosecond",
                "centisecond",
                "day",
                "decisecond",
                "exasecond",
                "femtosecond",
                "gigasecond",
                "hectosecond",
                "hour",
                "kilosecond",
                "megasecond",
                "microsecond",
                "millisecond",
                "minute",
                "nanosecond",
                "petasecond",
                "picosecond",
                "second",
                "terasecond",
                "yoctosecond",
                "yottasecond",
                "zeptosecond",
                "zettasecond"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        },
        "concatenable": {
          "default": false,
          "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
          "title": "Concatenable",
          "type": "boolean"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.TimeInputAxis",
      "type": "object"
    },
    "TimeOutputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "time",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "time",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attosecond",
                "centisecond",
                "day",
                "decisecond",
                "exasecond",
                "femtosecond",
                "gigasecond",
                "hectosecond",
                "hour",
                "kilosecond",
                "megasecond",
                "microsecond",
                "millisecond",
                "minute",
                "nanosecond",
                "petasecond",
                "picosecond",
                "second",
                "terasecond",
                "yoctosecond",
                "yottasecond",
                "zeptosecond",
                "zettasecond"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.TimeOutputAxis",
      "type": "object"
    },
    "TimeOutputAxisWithHalo": {
      "additionalProperties": false,
      "properties": {
        "halo": {
          "description": "The halo should be cropped from the output tensor to avoid boundary effects.\nIt is to be cropped from both sides, i.e. `size_after_crop = size - 2 * halo`.\nTo document a halo that is already cropped by the model use `size.offset` instead.",
          "minimum": 1,
          "title": "Halo",
          "type": "integer"
        },
        "size": {
          "$ref": "#/$defs/SizeReference",
          "description": "reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ]
        },
        "id": {
          "default": "time",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "time",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attosecond",
                "centisecond",
                "day",
                "decisecond",
                "exasecond",
                "femtosecond",
                "gigasecond",
                "hectosecond",
                "hour",
                "kilosecond",
                "megasecond",
                "microsecond",
                "millisecond",
                "minute",
                "nanosecond",
                "petasecond",
                "picosecond",
                "second",
                "terasecond",
                "yoctosecond",
                "yottasecond",
                "zeptosecond",
                "zettasecond"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "halo",
        "size",
        "type"
      ],
      "title": "model.v0_5.TimeOutputAxisWithHalo",
      "type": "object"
    },
    "TorchscriptWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "Source of the weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "pytorch_version": {
          "$ref": "#/$defs/Version",
          "description": "Version of the PyTorch library used."
        }
      },
      "required": [
        "source",
        "pytorch_version"
      ],
      "title": "model.v0_5.TorchscriptWeightsDescr",
      "type": "object"
    },
    "Uploader": {
      "additionalProperties": false,
      "properties": {
        "email": {
          "description": "Email",
          "format": "email",
          "title": "Email",
          "type": "string"
        },
        "name": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "name",
          "title": "Name"
        }
      },
      "required": [
        "email"
      ],
      "title": "generic.v0_2.Uploader",
      "type": "object"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    },
    "WeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "keras_hdf5": {
          "anyOf": [
            {
              "$ref": "#/$defs/KerasHdf5WeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        },
        "onnx": {
          "anyOf": [
            {
              "$ref": "#/$defs/OnnxWeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        },
        "pytorch_state_dict": {
          "anyOf": [
            {
              "$ref": "#/$defs/PytorchStateDictWeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        },
        "tensorflow_js": {
          "anyOf": [
            {
              "$ref": "#/$defs/TensorflowJsWeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        },
        "tensorflow_saved_model_bundle": {
          "anyOf": [
            {
              "$ref": "#/$defs/TensorflowSavedModelBundleWeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        },
        "torchscript": {
          "anyOf": [
            {
              "$ref": "#/$defs/TorchscriptWeightsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null
        }
      },
      "title": "model.v0_5.WeightsDescr",
      "type": "object"
    },
    "YamlValue": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "format": "date",
          "type": "string"
        },
        {
          "format": "date-time",
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        },
        {
          "type": "string"
        },
        {
          "items": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "array"
        },
        {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "object"
        },
        {
          "type": "null"
        }
      ]
    },
    "ZeroMeanUnitVarianceDescr": {
      "additionalProperties": false,
      "description": "Subtract mean and divide by variance.\n\nExamples:\n    Subtract tensor mean and variance\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: zero_mean_unit_variance\n    ```\n    - in Python\n    >>> preprocessing = [ZeroMeanUnitVarianceDescr()]",
      "properties": {
        "id": {
          "const": "zero_mean_unit_variance",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ZeroMeanUnitVarianceKwargs"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.ZeroMeanUnitVarianceDescr",
      "type": "object"
    },
    "ZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ZeroMeanUnitVarianceDescr`",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "items": {
                "maxLength": 16,
                "minLength": 1,
                "title": "AxisId",
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize each sample independently leave out the 'batch' axis.\nDefault: Scale all axes jointly.",
          "examples": [
            [
              "batch",
              "x",
              "y"
            ]
          ],
          "title": "Axes"
        },
        "eps": {
          "default": 1e-06,
          "description": "epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "title": "model.v0_5.ZeroMeanUnitVarianceKwargs",
      "type": "object"
    },
    "bioimageio__spec__dataset__v0_2__DatasetDescr": {
      "additionalProperties": false,
      "description": "A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage\nprocessing.",
      "properties": {
        "name": {
          "description": "A human-friendly name of the resource description",
          "minLength": 1,
          "title": "Name",
          "type": "string"
        },
        "description": {
          "title": "Description",
          "type": "string"
        },
        "covers": {
          "description": "Cover images. Please use an image smaller than 500KB and an aspect ratio width to height of 2:1.\nThe supported image formats are: ('.gif', '.jpeg', '.jpg', '.png', '.svg', '.tif', '.tiff')",
          "examples": [
            [
              "cover.png"
            ]
          ],
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Covers",
          "type": "array"
        },
        "id_emoji": {
          "anyOf": [
            {
              "examples": [
                "\ud83e\udd88",
                "\ud83e\udda5"
              ],
              "maxLength": 1,
              "minLength": 1,
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "UTF-8 emoji for display alongside the `id`.",
          "title": "Id Emoji"
        },
        "authors": {
          "description": "The authors are the creators of the RDF and the primary points of contact.",
          "items": {
            "$ref": "#/$defs/bioimageio__spec__generic__v0_2__Author"
          },
          "title": "Authors",
          "type": "array"
        },
        "attachments": {
          "anyOf": [
            {
              "$ref": "#/$defs/AttachmentsDescr"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "file and other attachments"
        },
        "cite": {
          "description": "citations",
          "items": {
            "$ref": "#/$defs/bioimageio__spec__generic__v0_2__CiteEntry"
          },
          "title": "Cite",
          "type": "array"
        },
        "config": {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "description": "A field for custom configuration that can contain any keys not present in the RDF spec.\nThis means you should not store, for example, a github repo URL in `config` since we already have the\n`git_repo` field defined in the spec.\nKeys in `config` may be very specific to a tool or consumer software. To avoid conflicting definitions,\nit is recommended to wrap added configuration into a sub-field named with the specific domain or tool name,\nfor example:\n```yaml\nconfig:\n    bioimageio:  # here is the domain name\n        my_custom_key: 3837283\n        another_key:\n            nested: value\n    imagej:       # config specific to ImageJ\n        macro_dir: path/to/macro/file\n```\nIf possible, please use [`snake_case`](https://en.wikipedia.org/wiki/Snake_case) for keys in `config`.\nYou may want to list linked files additionally under `attachments` to include them when packaging a resource\n(packaging a resource means downloading/copying important linked files and creating a ZIP archive that contains\nan altered rdf.yaml file with local references to the downloaded files)",
          "examples": [
            {
              "bioimageio": {
                "another_key": {
                  "nested": "value"
                },
                "my_custom_key": 3837283
              },
              "imagej": {
                "macro_dir": "path/to/macro/file"
              }
            }
          ],
          "title": "Config",
          "type": "object"
        },
        "download_url": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "URL to download the resource from (deprecated)",
          "title": "Download Url"
        },
        "git_repo": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A URL to the Git repository where the resource is being developed.",
          "examples": [
            "https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad"
          ],
          "title": "Git Repo"
        },
        "icon": {
          "anyOf": [
            {
              "maxLength": 2,
              "minLength": 1,
              "type": "string"
            },
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An icon for illustration",
          "title": "Icon"
        },
        "links": {
          "description": "IDs of other bioimage.io resources",
          "examples": [
            [
              "ilastik/ilastik",
              "deepimagej/deepimagej",
              "zero/notebook_u-net_3d_zerocostdl4mic"
            ]
          ],
          "items": {
            "type": "string"
          },
          "title": "Links",
          "type": "array"
        },
        "uploader": {
          "anyOf": [
            {
              "$ref": "#/$defs/Uploader"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The person who uploaded the model (e.g. to bioimage.io)"
        },
        "maintainers": {
          "description": "Maintainers of this resource.\nIf not specified `authors` are maintainers and at least some of them should specify their `github_user` name",
          "items": {
            "$ref": "#/$defs/bioimageio__spec__generic__v0_2__Maintainer"
          },
          "title": "Maintainers",
          "type": "array"
        },
        "rdf_source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Resource description file (RDF) source; used to keep track of where an rdf.yaml was loaded from.\nDo not set this field in a YAML file.",
          "title": "Rdf Source"
        },
        "tags": {
          "description": "Associated tags",
          "examples": [
            [
              "unet2d",
              "pytorch",
              "nucleus",
              "segmentation",
              "dsb2018"
            ]
          ],
          "items": {
            "type": "string"
          },
          "title": "Tags",
          "type": "array"
        },
        "version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The version of the resource following SemVer 2.0."
        },
        "version_number": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "version number (n-th published version, not the semantic version)",
          "title": "Version Number"
        },
        "format_version": {
          "const": "0.2.4",
          "description": "The format version of this resource specification\n(not the `version` of the resource description)\nWhen creating a new resource always use the latest micro/patch version described here.\nThe `format_version` is important for any consumer software to understand how to parse the fields.",
          "title": "Format Version",
          "type": "string"
        },
        "badges": {
          "description": "badges associated with this resource",
          "items": {
            "$ref": "#/$defs/BadgeDescr"
          },
          "title": "Badges",
          "type": "array"
        },
        "documentation": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "URL or relative path to a markdown file with additional documentation.\nThe recommended documentation file name is `README.md`. An `.md` suffix is mandatory.",
          "examples": [
            "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
            "README.md"
          ],
          "title": "Documentation"
        },
        "license": {
          "anyOf": [
            {
              "enum": [
                "0BSD",
                "AAL",
                "Abstyles",
                "AdaCore-doc",
                "Adobe-2006",
                "Adobe-Display-PostScript",
                "Adobe-Glyph",
                "Adobe-Utopia",
                "ADSL",
                "AFL-1.1",
                "AFL-1.2",
                "AFL-2.0",
                "AFL-2.1",
                "AFL-3.0",
                "Afmparse",
                "AGPL-1.0-only",
                "AGPL-1.0-or-later",
                "AGPL-3.0-only",
                "AGPL-3.0-or-later",
                "Aladdin",
                "AMDPLPA",
                "AML",
                "AML-glslang",
                "AMPAS",
                "ANTLR-PD",
                "ANTLR-PD-fallback",
                "Apache-1.0",
                "Apache-1.1",
                "Apache-2.0",
                "APAFML",
                "APL-1.0",
                "App-s2p",
                "APSL-1.0",
                "APSL-1.1",
                "APSL-1.2",
                "APSL-2.0",
                "Arphic-1999",
                "Artistic-1.0",
                "Artistic-1.0-cl8",
                "Artistic-1.0-Perl",
                "Artistic-2.0",
                "ASWF-Digital-Assets-1.0",
                "ASWF-Digital-Assets-1.1",
                "Baekmuk",
                "Bahyph",
                "Barr",
                "bcrypt-Solar-Designer",
                "Beerware",
                "Bitstream-Charter",
                "Bitstream-Vera",
                "BitTorrent-1.0",
                "BitTorrent-1.1",
                "blessing",
                "BlueOak-1.0.0",
                "Boehm-GC",
                "Borceux",
                "Brian-Gladman-2-Clause",
                "Brian-Gladman-3-Clause",
                "BSD-1-Clause",
                "BSD-2-Clause",
                "BSD-2-Clause-Darwin",
                "BSD-2-Clause-Patent",
                "BSD-2-Clause-Views",
                "BSD-3-Clause",
                "BSD-3-Clause-acpica",
                "BSD-3-Clause-Attribution",
                "BSD-3-Clause-Clear",
                "BSD-3-Clause-flex",
                "BSD-3-Clause-HP",
                "BSD-3-Clause-LBNL",
                "BSD-3-Clause-Modification",
                "BSD-3-Clause-No-Military-License",
                "BSD-3-Clause-No-Nuclear-License",
                "BSD-3-Clause-No-Nuclear-License-2014",
                "BSD-3-Clause-No-Nuclear-Warranty",
                "BSD-3-Clause-Open-MPI",
                "BSD-3-Clause-Sun",
                "BSD-4-Clause",
                "BSD-4-Clause-Shortened",
                "BSD-4-Clause-UC",
                "BSD-4.3RENO",
                "BSD-4.3TAHOE",
                "BSD-Advertising-Acknowledgement",
                "BSD-Attribution-HPND-disclaimer",
                "BSD-Inferno-Nettverk",
                "BSD-Protection",
                "BSD-Source-beginning-file",
                "BSD-Source-Code",
                "BSD-Systemics",
                "BSD-Systemics-W3Works",
                "BSL-1.0",
                "BUSL-1.1",
                "bzip2-1.0.6",
                "C-UDA-1.0",
                "CAL-1.0",
                "CAL-1.0-Combined-Work-Exception",
                "Caldera",
                "Caldera-no-preamble",
                "CATOSL-1.1",
                "CC-BY-1.0",
                "CC-BY-2.0",
                "CC-BY-2.5",
                "CC-BY-2.5-AU",
                "CC-BY-3.0",
                "CC-BY-3.0-AT",
                "CC-BY-3.0-AU",
                "CC-BY-3.0-DE",
                "CC-BY-3.0-IGO",
                "CC-BY-3.0-NL",
                "CC-BY-3.0-US",
                "CC-BY-4.0",
                "CC-BY-NC-1.0",
                "CC-BY-NC-2.0",
                "CC-BY-NC-2.5",
                "CC-BY-NC-3.0",
                "CC-BY-NC-3.0-DE",
                "CC-BY-NC-4.0",
                "CC-BY-NC-ND-1.0",
                "CC-BY-NC-ND-2.0",
                "CC-BY-NC-ND-2.5",
                "CC-BY-NC-ND-3.0",
                "CC-BY-NC-ND-3.0-DE",
                "CC-BY-NC-ND-3.0-IGO",
                "CC-BY-NC-ND-4.0",
                "CC-BY-NC-SA-1.0",
                "CC-BY-NC-SA-2.0",
                "CC-BY-NC-SA-2.0-DE",
                "CC-BY-NC-SA-2.0-FR",
                "CC-BY-NC-SA-2.0-UK",
                "CC-BY-NC-SA-2.5",
                "CC-BY-NC-SA-3.0",
                "CC-BY-NC-SA-3.0-DE",
                "CC-BY-NC-SA-3.0-IGO",
                "CC-BY-NC-SA-4.0",
                "CC-BY-ND-1.0",
                "CC-BY-ND-2.0",
                "CC-BY-ND-2.5",
                "CC-BY-ND-3.0",
                "CC-BY-ND-3.0-DE",
                "CC-BY-ND-4.0",
                "CC-BY-SA-1.0",
                "CC-BY-SA-2.0",
                "CC-BY-SA-2.0-UK",
                "CC-BY-SA-2.1-JP",
                "CC-BY-SA-2.5",
                "CC-BY-SA-3.0",
                "CC-BY-SA-3.0-AT",
                "CC-BY-SA-3.0-DE",
                "CC-BY-SA-3.0-IGO",
                "CC-BY-SA-4.0",
                "CC-PDDC",
                "CC0-1.0",
                "CDDL-1.0",
                "CDDL-1.1",
                "CDL-1.0",
                "CDLA-Permissive-1.0",
                "CDLA-Permissive-2.0",
                "CDLA-Sharing-1.0",
                "CECILL-1.0",
                "CECILL-1.1",
                "CECILL-2.0",
                "CECILL-2.1",
                "CECILL-B",
                "CECILL-C",
                "CERN-OHL-1.1",
                "CERN-OHL-1.2",
                "CERN-OHL-P-2.0",
                "CERN-OHL-S-2.0",
                "CERN-OHL-W-2.0",
                "CFITSIO",
                "check-cvs",
                "checkmk",
                "ClArtistic",
                "Clips",
                "CMU-Mach",
                "CMU-Mach-nodoc",
                "CNRI-Jython",
                "CNRI-Python",
                "CNRI-Python-GPL-Compatible",
                "COIL-1.0",
                "Community-Spec-1.0",
                "Condor-1.1",
                "copyleft-next-0.3.0",
                "copyleft-next-0.3.1",
                "Cornell-Lossless-JPEG",
                "CPAL-1.0",
                "CPL-1.0",
                "CPOL-1.02",
                "Cronyx",
                "Crossword",
                "CrystalStacker",
                "CUA-OPL-1.0",
                "Cube",
                "curl",
                "D-FSL-1.0",
                "DEC-3-Clause",
                "diffmark",
                "DL-DE-BY-2.0",
                "DL-DE-ZERO-2.0",
                "DOC",
                "Dotseqn",
                "DRL-1.0",
                "DRL-1.1",
                "DSDP",
                "dtoa",
                "dvipdfm",
                "ECL-1.0",
                "ECL-2.0",
                "EFL-1.0",
                "EFL-2.0",
                "eGenix",
                "Elastic-2.0",
                "Entessa",
                "EPICS",
                "EPL-1.0",
                "EPL-2.0",
                "ErlPL-1.1",
                "etalab-2.0",
                "EUDatagrid",
                "EUPL-1.0",
                "EUPL-1.1",
                "EUPL-1.2",
                "Eurosym",
                "Fair",
                "FBM",
                "FDK-AAC",
                "Ferguson-Twofish",
                "Frameworx-1.0",
                "FreeBSD-DOC",
                "FreeImage",
                "FSFAP",
                "FSFAP-no-warranty-disclaimer",
                "FSFUL",
                "FSFULLR",
                "FSFULLRWD",
                "FTL",
                "Furuseth",
                "fwlw",
                "GCR-docs",
                "GD",
                "GFDL-1.1-invariants-only",
                "GFDL-1.1-invariants-or-later",
                "GFDL-1.1-no-invariants-only",
                "GFDL-1.1-no-invariants-or-later",
                "GFDL-1.1-only",
                "GFDL-1.1-or-later",
                "GFDL-1.2-invariants-only",
                "GFDL-1.2-invariants-or-later",
                "GFDL-1.2-no-invariants-only",
                "GFDL-1.2-no-invariants-or-later",
                "GFDL-1.2-only",
                "GFDL-1.2-or-later",
                "GFDL-1.3-invariants-only",
                "GFDL-1.3-invariants-or-later",
                "GFDL-1.3-no-invariants-only",
                "GFDL-1.3-no-invariants-or-later",
                "GFDL-1.3-only",
                "GFDL-1.3-or-later",
                "Giftware",
                "GL2PS",
                "Glide",
                "Glulxe",
                "GLWTPL",
                "gnuplot",
                "GPL-1.0-only",
                "GPL-1.0-or-later",
                "GPL-2.0-only",
                "GPL-2.0-or-later",
                "GPL-3.0-only",
                "GPL-3.0-or-later",
                "Graphics-Gems",
                "gSOAP-1.3b",
                "gtkbook",
                "HaskellReport",
                "hdparm",
                "Hippocratic-2.1",
                "HP-1986",
                "HP-1989",
                "HPND",
                "HPND-DEC",
                "HPND-doc",
                "HPND-doc-sell",
                "HPND-export-US",
                "HPND-export-US-modify",
                "HPND-Fenneberg-Livingston",
                "HPND-INRIA-IMAG",
                "HPND-Kevlin-Henney",
                "HPND-Markus-Kuhn",
                "HPND-MIT-disclaimer",
                "HPND-Pbmplus",
                "HPND-sell-MIT-disclaimer-xserver",
                "HPND-sell-regexpr",
                "HPND-sell-variant",
                "HPND-sell-variant-MIT-disclaimer",
                "HPND-UC",
                "HTMLTIDY",
                "IBM-pibs",
                "ICU",
                "IEC-Code-Components-EULA",
                "IJG",
                "IJG-short",
                "ImageMagick",
                "iMatix",
                "Imlib2",
                "Info-ZIP",
                "Inner-Net-2.0",
                "Intel",
                "Intel-ACPI",
                "Interbase-1.0",
                "IPA",
                "IPL-1.0",
                "ISC",
                "ISC-Veillard",
                "Jam",
                "JasPer-2.0",
                "JPL-image",
                "JPNIC",
                "JSON",
                "Kastrup",
                "Kazlib",
                "Knuth-CTAN",
                "LAL-1.2",
                "LAL-1.3",
                "Latex2e",
                "Latex2e-translated-notice",
                "Leptonica",
                "LGPL-2.0-only",
                "LGPL-2.0-or-later",
                "LGPL-2.1-only",
                "LGPL-2.1-or-later",
                "LGPL-3.0-only",
                "LGPL-3.0-or-later",
                "LGPLLR",
                "Libpng",
                "libpng-2.0",
                "libselinux-1.0",
                "libtiff",
                "libutil-David-Nugent",
                "LiLiQ-P-1.1",
                "LiLiQ-R-1.1",
                "LiLiQ-Rplus-1.1",
                "Linux-man-pages-1-para",
                "Linux-man-pages-copyleft",
                "Linux-man-pages-copyleft-2-para",
                "Linux-man-pages-copyleft-var",
                "Linux-OpenIB",
                "LOOP",
                "LPD-document",
                "LPL-1.0",
                "LPL-1.02",
                "LPPL-1.0",
                "LPPL-1.1",
                "LPPL-1.2",
                "LPPL-1.3a",
                "LPPL-1.3c",
                "lsof",
                "Lucida-Bitmap-Fonts",
                "LZMA-SDK-9.11-to-9.20",
                "LZMA-SDK-9.22",
                "Mackerras-3-Clause",
                "Mackerras-3-Clause-acknowledgment",
                "magaz",
                "mailprio",
                "MakeIndex",
                "Martin-Birgmeier",
                "McPhee-slideshow",
                "metamail",
                "Minpack",
                "MirOS",
                "MIT",
                "MIT-0",
                "MIT-advertising",
                "MIT-CMU",
                "MIT-enna",
                "MIT-feh",
                "MIT-Festival",
                "MIT-Modern-Variant",
                "MIT-open-group",
                "MIT-testregex",
                "MIT-Wu",
                "MITNFA",
                "MMIXware",
                "Motosoto",
                "MPEG-SSG",
                "mpi-permissive",
                "mpich2",
                "MPL-1.0",
                "MPL-1.1",
                "MPL-2.0",
                "MPL-2.0-no-copyleft-exception",
                "mplus",
                "MS-LPL",
                "MS-PL",
                "MS-RL",
                "MTLL",
                "MulanPSL-1.0",
                "MulanPSL-2.0",
                "Multics",
                "Mup",
                "NAIST-2003",
                "NASA-1.3",
                "Naumen",
                "NBPL-1.0",
                "NCGL-UK-2.0",
                "NCSA",
                "Net-SNMP",
                "NetCDF",
                "Newsletr",
                "NGPL",
                "NICTA-1.0",
                "NIST-PD",
                "NIST-PD-fallback",
                "NIST-Software",
                "NLOD-1.0",
                "NLOD-2.0",
                "NLPL",
                "Nokia",
                "NOSL",
                "Noweb",
                "NPL-1.0",
                "NPL-1.1",
                "NPOSL-3.0",
                "NRL",
                "NTP",
                "NTP-0",
                "O-UDA-1.0",
                "OCCT-PL",
                "OCLC-2.0",
                "ODbL-1.0",
                "ODC-By-1.0",
                "OFFIS",
                "OFL-1.0",
                "OFL-1.0-no-RFN",
                "OFL-1.0-RFN",
                "OFL-1.1",
                "OFL-1.1-no-RFN",
                "OFL-1.1-RFN",
                "OGC-1.0",
                "OGDL-Taiwan-1.0",
                "OGL-Canada-2.0",
                "OGL-UK-1.0",
                "OGL-UK-2.0",
                "OGL-UK-3.0",
                "OGTSL",
                "OLDAP-1.1",
                "OLDAP-1.2",
                "OLDAP-1.3",
                "OLDAP-1.4",
                "OLDAP-2.0",
                "OLDAP-2.0.1",
                "OLDAP-2.1",
                "OLDAP-2.2",
                "OLDAP-2.2.1",
                "OLDAP-2.2.2",
                "OLDAP-2.3",
                "OLDAP-2.4",
                "OLDAP-2.5",
                "OLDAP-2.6",
                "OLDAP-2.7",
                "OLDAP-2.8",
                "OLFL-1.3",
                "OML",
                "OpenPBS-2.3",
                "OpenSSL",
                "OpenSSL-standalone",
                "OpenVision",
                "OPL-1.0",
                "OPL-UK-3.0",
                "OPUBL-1.0",
                "OSET-PL-2.1",
                "OSL-1.0",
                "OSL-1.1",
                "OSL-2.0",
                "OSL-2.1",
                "OSL-3.0",
                "PADL",
                "Parity-6.0.0",
                "Parity-7.0.0",
                "PDDL-1.0",
                "PHP-3.0",
                "PHP-3.01",
                "Pixar",
                "Plexus",
                "pnmstitch",
                "PolyForm-Noncommercial-1.0.0",
                "PolyForm-Small-Business-1.0.0",
                "PostgreSQL",
                "PSF-2.0",
                "psfrag",
                "psutils",
                "Python-2.0",
                "Python-2.0.1",
                "python-ldap",
                "Qhull",
                "QPL-1.0",
                "QPL-1.0-INRIA-2004",
                "radvd",
                "Rdisc",
                "RHeCos-1.1",
                "RPL-1.1",
                "RPL-1.5",
                "RPSL-1.0",
                "RSA-MD",
                "RSCPL",
                "Ruby",
                "SAX-PD",
                "SAX-PD-2.0",
                "Saxpath",
                "SCEA",
                "SchemeReport",
                "Sendmail",
                "Sendmail-8.23",
                "SGI-B-1.0",
                "SGI-B-1.1",
                "SGI-B-2.0",
                "SGI-OpenGL",
                "SGP4",
                "SHL-0.5",
                "SHL-0.51",
                "SimPL-2.0",
                "SISSL",
                "SISSL-1.2",
                "SL",
                "Sleepycat",
                "SMLNJ",
                "SMPPL",
                "SNIA",
                "snprintf",
                "softSurfer",
                "Soundex",
                "Spencer-86",
                "Spencer-94",
                "Spencer-99",
                "SPL-1.0",
                "ssh-keyscan",
                "SSH-OpenSSH",
                "SSH-short",
                "SSLeay-standalone",
                "SSPL-1.0",
                "SugarCRM-1.1.3",
                "Sun-PPP",
                "SunPro",
                "SWL",
                "swrule",
                "Symlinks",
                "TAPR-OHL-1.0",
                "TCL",
                "TCP-wrappers",
                "TermReadKey",
                "TGPPL-1.0",
                "TMate",
                "TORQUE-1.1",
                "TOSL",
                "TPDL",
                "TPL-1.0",
                "TTWL",
                "TTYP0",
                "TU-Berlin-1.0",
                "TU-Berlin-2.0",
                "UCAR",
                "UCL-1.0",
                "ulem",
                "UMich-Merit",
                "Unicode-3.0",
                "Unicode-DFS-2015",
                "Unicode-DFS-2016",
                "Unicode-TOU",
                "UnixCrypt",
                "Unlicense",
                "UPL-1.0",
                "URT-RLE",
                "Vim",
                "VOSTROM",
                "VSL-1.0",
                "W3C",
                "W3C-19980720",
                "W3C-20150513",
                "w3m",
                "Watcom-1.0",
                "Widget-Workshop",
                "Wsuipa",
                "WTFPL",
                "X11",
                "X11-distribute-modifications-variant",
                "Xdebug-1.03",
                "Xerox",
                "Xfig",
                "XFree86-1.1",
                "xinetd",
                "xkeyboard-config-Zinoviev",
                "xlock",
                "Xnet",
                "xpp",
                "XSkat",
                "YPL-1.0",
                "YPL-1.1",
                "Zed",
                "Zeeff",
                "Zend-2.0",
                "Zimbra-1.3",
                "Zimbra-1.4",
                "Zlib",
                "zlib-acknowledgement",
                "ZPL-1.1",
                "ZPL-2.0",
                "ZPL-2.1"
              ],
              "title": "LicenseId",
              "type": "string"
            },
            {
              "enum": [
                "AGPL-1.0",
                "AGPL-3.0",
                "BSD-2-Clause-FreeBSD",
                "BSD-2-Clause-NetBSD",
                "bzip2-1.0.5",
                "eCos-2.0",
                "GFDL-1.1",
                "GFDL-1.2",
                "GFDL-1.3",
                "GPL-1.0",
                "GPL-1.0+",
                "GPL-2.0",
                "GPL-2.0+",
                "GPL-2.0-with-autoconf-exception",
                "GPL-2.0-with-bison-exception",
                "GPL-2.0-with-classpath-exception",
                "GPL-2.0-with-font-exception",
                "GPL-2.0-with-GCC-exception",
                "GPL-3.0",
                "GPL-3.0+",
                "GPL-3.0-with-autoconf-exception",
                "GPL-3.0-with-GCC-exception",
                "LGPL-2.0",
                "LGPL-2.0+",
                "LGPL-2.1",
                "LGPL-2.1+",
                "LGPL-3.0",
                "LGPL-3.0+",
                "Nunit",
                "StandardML-NJ",
                "wxWindows"
              ],
              "title": "DeprecatedLicenseId",
              "type": "string"
            },
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A [SPDX license identifier](https://spdx.org/licenses/).\nWe do not support custom license beyond the SPDX license list, if you need that please\n[open a GitHub issue](https://github.com/bioimage-io/spec-bioimage-io/issues/new/choose\n) to discuss your intentions with the community.",
          "examples": [
            "CC0-1.0",
            "MIT",
            "BSD-2-Clause"
          ],
          "title": "License"
        },
        "type": {
          "const": "dataset",
          "title": "Type",
          "type": "string"
        },
        "id": {
          "anyOf": [
            {
              "minLength": 1,
              "title": "DatasetId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "bioimage.io-wide unique resource identifier\nassigned by bioimage.io; version **un**specific.",
          "title": "Id"
        },
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "\"URL to the source of the dataset.",
          "title": "Source"
        }
      },
      "required": [
        "name",
        "description",
        "format_version",
        "type"
      ],
      "title": "dataset 0.2.4",
      "type": "object"
    },
    "bioimageio__spec__dataset__v0_3__DatasetDescr": {
      "additionalProperties": false,
      "description": "A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage\nprocessing.",
      "properties": {
        "name": {
          "description": "A human-friendly name of the resource description.\nMay only contains letters, digits, underscore, minus, parentheses and spaces.",
          "maxLength": 128,
          "minLength": 5,
          "title": "Name",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A string containing a brief description.",
          "maxLength": 1024,
          "title": "Description",
          "type": "string"
        },
        "covers": {
          "description": "Cover images. Please use an image smaller than 500KB and an aspect ratio width to height of 2:1 or 1:1.\nThe supported image formats are: ('.gif', '.jpeg', '.jpg', '.png', '.svg')",
          "examples": [
            [
              "cover.png"
            ]
          ],
          "items": {
            "anyOf": [
              {
                "description": "A URL with the HTTP or HTTPS scheme.",
                "format": "uri",
                "maxLength": 2083,
                "minLength": 1,
                "title": "HttpUrl",
                "type": "string"
              },
              {
                "$ref": "#/$defs/RelativeFilePath"
              },
              {
                "format": "file-path",
                "title": "FilePath",
                "type": "string"
              }
            ]
          },
          "title": "Covers",
          "type": "array"
        },
        "id_emoji": {
          "anyOf": [
            {
              "examples": [
                "\ud83e\udd88",
                "\ud83e\udda5"
              ],
              "maxLength": 2,
              "minLength": 1,
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "UTF-8 emoji for display alongside the `id`.",
          "title": "Id Emoji"
        },
        "authors": {
          "description": "The authors are the creators of this resource description and the primary points of contact.",
          "items": {
            "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Author"
          },
          "title": "Authors",
          "type": "array"
        },
        "attachments": {
          "description": "file attachments",
          "items": {
            "$ref": "#/$defs/FileDescr"
          },
          "title": "Attachments",
          "type": "array"
        },
        "cite": {
          "description": "citations",
          "items": {
            "$ref": "#/$defs/bioimageio__spec__generic__v0_3__CiteEntry"
          },
          "title": "Cite",
          "type": "array"
        },
        "license": {
          "anyOf": [
            {
              "enum": [
                "0BSD",
                "AAL",
                "Abstyles",
                "AdaCore-doc",
                "Adobe-2006",
                "Adobe-Display-PostScript",
                "Adobe-Glyph",
                "Adobe-Utopia",
                "ADSL",
                "AFL-1.1",
                "AFL-1.2",
                "AFL-2.0",
                "AFL-2.1",
                "AFL-3.0",
                "Afmparse",
                "AGPL-1.0-only",
                "AGPL-1.0-or-later",
                "AGPL-3.0-only",
                "AGPL-3.0-or-later",
                "Aladdin",
                "AMDPLPA",
                "AML",
                "AML-glslang",
                "AMPAS",
                "ANTLR-PD",
                "ANTLR-PD-fallback",
                "Apache-1.0",
                "Apache-1.1",
                "Apache-2.0",
                "APAFML",
                "APL-1.0",
                "App-s2p",
                "APSL-1.0",
                "APSL-1.1",
                "APSL-1.2",
                "APSL-2.0",
                "Arphic-1999",
                "Artistic-1.0",
                "Artistic-1.0-cl8",
                "Artistic-1.0-Perl",
                "Artistic-2.0",
                "ASWF-Digital-Assets-1.0",
                "ASWF-Digital-Assets-1.1",
                "Baekmuk",
                "Bahyph",
                "Barr",
                "bcrypt-Solar-Designer",
                "Beerware",
                "Bitstream-Charter",
                "Bitstream-Vera",
                "BitTorrent-1.0",
                "BitTorrent-1.1",
                "blessing",
                "BlueOak-1.0.0",
                "Boehm-GC",
                "Borceux",
                "Brian-Gladman-2-Clause",
                "Brian-Gladman-3-Clause",
                "BSD-1-Clause",
                "BSD-2-Clause",
                "BSD-2-Clause-Darwin",
                "BSD-2-Clause-Patent",
                "BSD-2-Clause-Views",
                "BSD-3-Clause",
                "BSD-3-Clause-acpica",
                "BSD-3-Clause-Attribution",
                "BSD-3-Clause-Clear",
                "BSD-3-Clause-flex",
                "BSD-3-Clause-HP",
                "BSD-3-Clause-LBNL",
                "BSD-3-Clause-Modification",
                "BSD-3-Clause-No-Military-License",
                "BSD-3-Clause-No-Nuclear-License",
                "BSD-3-Clause-No-Nuclear-License-2014",
                "BSD-3-Clause-No-Nuclear-Warranty",
                "BSD-3-Clause-Open-MPI",
                "BSD-3-Clause-Sun",
                "BSD-4-Clause",
                "BSD-4-Clause-Shortened",
                "BSD-4-Clause-UC",
                "BSD-4.3RENO",
                "BSD-4.3TAHOE",
                "BSD-Advertising-Acknowledgement",
                "BSD-Attribution-HPND-disclaimer",
                "BSD-Inferno-Nettverk",
                "BSD-Protection",
                "BSD-Source-beginning-file",
                "BSD-Source-Code",
                "BSD-Systemics",
                "BSD-Systemics-W3Works",
                "BSL-1.0",
                "BUSL-1.1",
                "bzip2-1.0.6",
                "C-UDA-1.0",
                "CAL-1.0",
                "CAL-1.0-Combined-Work-Exception",
                "Caldera",
                "Caldera-no-preamble",
                "CATOSL-1.1",
                "CC-BY-1.0",
                "CC-BY-2.0",
                "CC-BY-2.5",
                "CC-BY-2.5-AU",
                "CC-BY-3.0",
                "CC-BY-3.0-AT",
                "CC-BY-3.0-AU",
                "CC-BY-3.0-DE",
                "CC-BY-3.0-IGO",
                "CC-BY-3.0-NL",
                "CC-BY-3.0-US",
                "CC-BY-4.0",
                "CC-BY-NC-1.0",
                "CC-BY-NC-2.0",
                "CC-BY-NC-2.5",
                "CC-BY-NC-3.0",
                "CC-BY-NC-3.0-DE",
                "CC-BY-NC-4.0",
                "CC-BY-NC-ND-1.0",
                "CC-BY-NC-ND-2.0",
                "CC-BY-NC-ND-2.5",
                "CC-BY-NC-ND-3.0",
                "CC-BY-NC-ND-3.0-DE",
                "CC-BY-NC-ND-3.0-IGO",
                "CC-BY-NC-ND-4.0",
                "CC-BY-NC-SA-1.0",
                "CC-BY-NC-SA-2.0",
                "CC-BY-NC-SA-2.0-DE",
                "CC-BY-NC-SA-2.0-FR",
                "CC-BY-NC-SA-2.0-UK",
                "CC-BY-NC-SA-2.5",
                "CC-BY-NC-SA-3.0",
                "CC-BY-NC-SA-3.0-DE",
                "CC-BY-NC-SA-3.0-IGO",
                "CC-BY-NC-SA-4.0",
                "CC-BY-ND-1.0",
                "CC-BY-ND-2.0",
                "CC-BY-ND-2.5",
                "CC-BY-ND-3.0",
                "CC-BY-ND-3.0-DE",
                "CC-BY-ND-4.0",
                "CC-BY-SA-1.0",
                "CC-BY-SA-2.0",
                "CC-BY-SA-2.0-UK",
                "CC-BY-SA-2.1-JP",
                "CC-BY-SA-2.5",
                "CC-BY-SA-3.0",
                "CC-BY-SA-3.0-AT",
                "CC-BY-SA-3.0-DE",
                "CC-BY-SA-3.0-IGO",
                "CC-BY-SA-4.0",
                "CC-PDDC",
                "CC0-1.0",
                "CDDL-1.0",
                "CDDL-1.1",
                "CDL-1.0",
                "CDLA-Permissive-1.0",
                "CDLA-Permissive-2.0",
                "CDLA-Sharing-1.0",
                "CECILL-1.0",
                "CECILL-1.1",
                "CECILL-2.0",
                "CECILL-2.1",
                "CECILL-B",
                "CECILL-C",
                "CERN-OHL-1.1",
                "CERN-OHL-1.2",
                "CERN-OHL-P-2.0",
                "CERN-OHL-S-2.0",
                "CERN-OHL-W-2.0",
                "CFITSIO",
                "check-cvs",
                "checkmk",
                "ClArtistic",
                "Clips",
                "CMU-Mach",
                "CMU-Mach-nodoc",
                "CNRI-Jython",
                "CNRI-Python",
                "CNRI-Python-GPL-Compatible",
                "COIL-1.0",
                "Community-Spec-1.0",
                "Condor-1.1",
                "copyleft-next-0.3.0",
                "copyleft-next-0.3.1",
                "Cornell-Lossless-JPEG",
                "CPAL-1.0",
                "CPL-1.0",
                "CPOL-1.02",
                "Cronyx",
                "Crossword",
                "CrystalStacker",
                "CUA-OPL-1.0",
                "Cube",
                "curl",
                "D-FSL-1.0",
                "DEC-3-Clause",
                "diffmark",
                "DL-DE-BY-2.0",
                "DL-DE-ZERO-2.0",
                "DOC",
                "Dotseqn",
                "DRL-1.0",
                "DRL-1.1",
                "DSDP",
                "dtoa",
                "dvipdfm",
                "ECL-1.0",
                "ECL-2.0",
                "EFL-1.0",
                "EFL-2.0",
                "eGenix",
                "Elastic-2.0",
                "Entessa",
                "EPICS",
                "EPL-1.0",
                "EPL-2.0",
                "ErlPL-1.1",
                "etalab-2.0",
                "EUDatagrid",
                "EUPL-1.0",
                "EUPL-1.1",
                "EUPL-1.2",
                "Eurosym",
                "Fair",
                "FBM",
                "FDK-AAC",
                "Ferguson-Twofish",
                "Frameworx-1.0",
                "FreeBSD-DOC",
                "FreeImage",
                "FSFAP",
                "FSFAP-no-warranty-disclaimer",
                "FSFUL",
                "FSFULLR",
                "FSFULLRWD",
                "FTL",
                "Furuseth",
                "fwlw",
                "GCR-docs",
                "GD",
                "GFDL-1.1-invariants-only",
                "GFDL-1.1-invariants-or-later",
                "GFDL-1.1-no-invariants-only",
                "GFDL-1.1-no-invariants-or-later",
                "GFDL-1.1-only",
                "GFDL-1.1-or-later",
                "GFDL-1.2-invariants-only",
                "GFDL-1.2-invariants-or-later",
                "GFDL-1.2-no-invariants-only",
                "GFDL-1.2-no-invariants-or-later",
                "GFDL-1.2-only",
                "GFDL-1.2-or-later",
                "GFDL-1.3-invariants-only",
                "GFDL-1.3-invariants-or-later",
                "GFDL-1.3-no-invariants-only",
                "GFDL-1.3-no-invariants-or-later",
                "GFDL-1.3-only",
                "GFDL-1.3-or-later",
                "Giftware",
                "GL2PS",
                "Glide",
                "Glulxe",
                "GLWTPL",
                "gnuplot",
                "GPL-1.0-only",
                "GPL-1.0-or-later",
                "GPL-2.0-only",
                "GPL-2.0-or-later",
                "GPL-3.0-only",
                "GPL-3.0-or-later",
                "Graphics-Gems",
                "gSOAP-1.3b",
                "gtkbook",
                "HaskellReport",
                "hdparm",
                "Hippocratic-2.1",
                "HP-1986",
                "HP-1989",
                "HPND",
                "HPND-DEC",
                "HPND-doc",
                "HPND-doc-sell",
                "HPND-export-US",
                "HPND-export-US-modify",
                "HPND-Fenneberg-Livingston",
                "HPND-INRIA-IMAG",
                "HPND-Kevlin-Henney",
                "HPND-Markus-Kuhn",
                "HPND-MIT-disclaimer",
                "HPND-Pbmplus",
                "HPND-sell-MIT-disclaimer-xserver",
                "HPND-sell-regexpr",
                "HPND-sell-variant",
                "HPND-sell-variant-MIT-disclaimer",
                "HPND-UC",
                "HTMLTIDY",
                "IBM-pibs",
                "ICU",
                "IEC-Code-Components-EULA",
                "IJG",
                "IJG-short",
                "ImageMagick",
                "iMatix",
                "Imlib2",
                "Info-ZIP",
                "Inner-Net-2.0",
                "Intel",
                "Intel-ACPI",
                "Interbase-1.0",
                "IPA",
                "IPL-1.0",
                "ISC",
                "ISC-Veillard",
                "Jam",
                "JasPer-2.0",
                "JPL-image",
                "JPNIC",
                "JSON",
                "Kastrup",
                "Kazlib",
                "Knuth-CTAN",
                "LAL-1.2",
                "LAL-1.3",
                "Latex2e",
                "Latex2e-translated-notice",
                "Leptonica",
                "LGPL-2.0-only",
                "LGPL-2.0-or-later",
                "LGPL-2.1-only",
                "LGPL-2.1-or-later",
                "LGPL-3.0-only",
                "LGPL-3.0-or-later",
                "LGPLLR",
                "Libpng",
                "libpng-2.0",
                "libselinux-1.0",
                "libtiff",
                "libutil-David-Nugent",
                "LiLiQ-P-1.1",
                "LiLiQ-R-1.1",
                "LiLiQ-Rplus-1.1",
                "Linux-man-pages-1-para",
                "Linux-man-pages-copyleft",
                "Linux-man-pages-copyleft-2-para",
                "Linux-man-pages-copyleft-var",
                "Linux-OpenIB",
                "LOOP",
                "LPD-document",
                "LPL-1.0",
                "LPL-1.02",
                "LPPL-1.0",
                "LPPL-1.1",
                "LPPL-1.2",
                "LPPL-1.3a",
                "LPPL-1.3c",
                "lsof",
                "Lucida-Bitmap-Fonts",
                "LZMA-SDK-9.11-to-9.20",
                "LZMA-SDK-9.22",
                "Mackerras-3-Clause",
                "Mackerras-3-Clause-acknowledgment",
                "magaz",
                "mailprio",
                "MakeIndex",
                "Martin-Birgmeier",
                "McPhee-slideshow",
                "metamail",
                "Minpack",
                "MirOS",
                "MIT",
                "MIT-0",
                "MIT-advertising",
                "MIT-CMU",
                "MIT-enna",
                "MIT-feh",
                "MIT-Festival",
                "MIT-Modern-Variant",
                "MIT-open-group",
                "MIT-testregex",
                "MIT-Wu",
                "MITNFA",
                "MMIXware",
                "Motosoto",
                "MPEG-SSG",
                "mpi-permissive",
                "mpich2",
                "MPL-1.0",
                "MPL-1.1",
                "MPL-2.0",
                "MPL-2.0-no-copyleft-exception",
                "mplus",
                "MS-LPL",
                "MS-PL",
                "MS-RL",
                "MTLL",
                "MulanPSL-1.0",
                "MulanPSL-2.0",
                "Multics",
                "Mup",
                "NAIST-2003",
                "NASA-1.3",
                "Naumen",
                "NBPL-1.0",
                "NCGL-UK-2.0",
                "NCSA",
                "Net-SNMP",
                "NetCDF",
                "Newsletr",
                "NGPL",
                "NICTA-1.0",
                "NIST-PD",
                "NIST-PD-fallback",
                "NIST-Software",
                "NLOD-1.0",
                "NLOD-2.0",
                "NLPL",
                "Nokia",
                "NOSL",
                "Noweb",
                "NPL-1.0",
                "NPL-1.1",
                "NPOSL-3.0",
                "NRL",
                "NTP",
                "NTP-0",
                "O-UDA-1.0",
                "OCCT-PL",
                "OCLC-2.0",
                "ODbL-1.0",
                "ODC-By-1.0",
                "OFFIS",
                "OFL-1.0",
                "OFL-1.0-no-RFN",
                "OFL-1.0-RFN",
                "OFL-1.1",
                "OFL-1.1-no-RFN",
                "OFL-1.1-RFN",
                "OGC-1.0",
                "OGDL-Taiwan-1.0",
                "OGL-Canada-2.0",
                "OGL-UK-1.0",
                "OGL-UK-2.0",
                "OGL-UK-3.0",
                "OGTSL",
                "OLDAP-1.1",
                "OLDAP-1.2",
                "OLDAP-1.3",
                "OLDAP-1.4",
                "OLDAP-2.0",
                "OLDAP-2.0.1",
                "OLDAP-2.1",
                "OLDAP-2.2",
                "OLDAP-2.2.1",
                "OLDAP-2.2.2",
                "OLDAP-2.3",
                "OLDAP-2.4",
                "OLDAP-2.5",
                "OLDAP-2.6",
                "OLDAP-2.7",
                "OLDAP-2.8",
                "OLFL-1.3",
                "OML",
                "OpenPBS-2.3",
                "OpenSSL",
                "OpenSSL-standalone",
                "OpenVision",
                "OPL-1.0",
                "OPL-UK-3.0",
                "OPUBL-1.0",
                "OSET-PL-2.1",
                "OSL-1.0",
                "OSL-1.1",
                "OSL-2.0",
                "OSL-2.1",
                "OSL-3.0",
                "PADL",
                "Parity-6.0.0",
                "Parity-7.0.0",
                "PDDL-1.0",
                "PHP-3.0",
                "PHP-3.01",
                "Pixar",
                "Plexus",
                "pnmstitch",
                "PolyForm-Noncommercial-1.0.0",
                "PolyForm-Small-Business-1.0.0",
                "PostgreSQL",
                "PSF-2.0",
                "psfrag",
                "psutils",
                "Python-2.0",
                "Python-2.0.1",
                "python-ldap",
                "Qhull",
                "QPL-1.0",
                "QPL-1.0-INRIA-2004",
                "radvd",
                "Rdisc",
                "RHeCos-1.1",
                "RPL-1.1",
                "RPL-1.5",
                "RPSL-1.0",
                "RSA-MD",
                "RSCPL",
                "Ruby",
                "SAX-PD",
                "SAX-PD-2.0",
                "Saxpath",
                "SCEA",
                "SchemeReport",
                "Sendmail",
                "Sendmail-8.23",
                "SGI-B-1.0",
                "SGI-B-1.1",
                "SGI-B-2.0",
                "SGI-OpenGL",
                "SGP4",
                "SHL-0.5",
                "SHL-0.51",
                "SimPL-2.0",
                "SISSL",
                "SISSL-1.2",
                "SL",
                "Sleepycat",
                "SMLNJ",
                "SMPPL",
                "SNIA",
                "snprintf",
                "softSurfer",
                "Soundex",
                "Spencer-86",
                "Spencer-94",
                "Spencer-99",
                "SPL-1.0",
                "ssh-keyscan",
                "SSH-OpenSSH",
                "SSH-short",
                "SSLeay-standalone",
                "SSPL-1.0",
                "SugarCRM-1.1.3",
                "Sun-PPP",
                "SunPro",
                "SWL",
                "swrule",
                "Symlinks",
                "TAPR-OHL-1.0",
                "TCL",
                "TCP-wrappers",
                "TermReadKey",
                "TGPPL-1.0",
                "TMate",
                "TORQUE-1.1",
                "TOSL",
                "TPDL",
                "TPL-1.0",
                "TTWL",
                "TTYP0",
                "TU-Berlin-1.0",
                "TU-Berlin-2.0",
                "UCAR",
                "UCL-1.0",
                "ulem",
                "UMich-Merit",
                "Unicode-3.0",
                "Unicode-DFS-2015",
                "Unicode-DFS-2016",
                "Unicode-TOU",
                "UnixCrypt",
                "Unlicense",
                "UPL-1.0",
                "URT-RLE",
                "Vim",
                "VOSTROM",
                "VSL-1.0",
                "W3C",
                "W3C-19980720",
                "W3C-20150513",
                "w3m",
                "Watcom-1.0",
                "Widget-Workshop",
                "Wsuipa",
                "WTFPL",
                "X11",
                "X11-distribute-modifications-variant",
                "Xdebug-1.03",
                "Xerox",
                "Xfig",
                "XFree86-1.1",
                "xinetd",
                "xkeyboard-config-Zinoviev",
                "xlock",
                "Xnet",
                "xpp",
                "XSkat",
                "YPL-1.0",
                "YPL-1.1",
                "Zed",
                "Zeeff",
                "Zend-2.0",
                "Zimbra-1.3",
                "Zimbra-1.4",
                "Zlib",
                "zlib-acknowledgement",
                "ZPL-1.1",
                "ZPL-2.0",
                "ZPL-2.1"
              ],
              "title": "LicenseId",
              "type": "string"
            },
            {
              "enum": [
                "AGPL-1.0",
                "AGPL-3.0",
                "BSD-2-Clause-FreeBSD",
                "BSD-2-Clause-NetBSD",
                "bzip2-1.0.5",
                "eCos-2.0",
                "GFDL-1.1",
                "GFDL-1.2",
                "GFDL-1.3",
                "GPL-1.0",
                "GPL-1.0+",
                "GPL-2.0",
                "GPL-2.0+",
                "GPL-2.0-with-autoconf-exception",
                "GPL-2.0-with-bison-exception",
                "GPL-2.0-with-classpath-exception",
                "GPL-2.0-with-font-exception",
                "GPL-2.0-with-GCC-exception",
                "GPL-3.0",
                "GPL-3.0+",
                "GPL-3.0-with-autoconf-exception",
                "GPL-3.0-with-GCC-exception",
                "LGPL-2.0",
                "LGPL-2.0+",
                "LGPL-2.1",
                "LGPL-2.1+",
                "LGPL-3.0",
                "LGPL-3.0+",
                "Nunit",
                "StandardML-NJ",
                "wxWindows"
              ],
              "title": "DeprecatedLicenseId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A [SPDX license identifier](https://spdx.org/licenses/).\nWe do not support custom license beyond the SPDX license list, if you need that please\n[open a GitHub issue](https://github.com/bioimage-io/spec-bioimage-io/issues/new/choose)\nto discuss your intentions with the community.",
          "examples": [
            "CC0-1.0",
            "MIT",
            "BSD-2-Clause"
          ],
          "title": "License"
        },
        "git_repo": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A URL to the Git repository where the resource is being developed.",
          "examples": [
            "https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad"
          ],
          "title": "Git Repo"
        },
        "icon": {
          "anyOf": [
            {
              "maxLength": 2,
              "minLength": 1,
              "type": "string"
            },
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An icon for illustration, e.g. on bioimage.io",
          "title": "Icon"
        },
        "links": {
          "description": "IDs of other bioimage.io resources",
          "examples": [
            [
              "ilastik/ilastik",
              "deepimagej/deepimagej",
              "zero/notebook_u-net_3d_zerocostdl4mic"
            ]
          ],
          "items": {
            "type": "string"
          },
          "title": "Links",
          "type": "array"
        },
        "uploader": {
          "anyOf": [
            {
              "$ref": "#/$defs/Uploader"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The person who uploaded the model (e.g. to bioimage.io)"
        },
        "maintainers": {
          "description": "Maintainers of this resource.\nIf not specified, `authors` are maintainers and at least some of them has to specify their `github_user` name",
          "items": {
            "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Maintainer"
          },
          "title": "Maintainers",
          "type": "array"
        },
        "tags": {
          "description": "Associated tags",
          "examples": [
            [
              "unet2d",
              "pytorch",
              "nucleus",
              "segmentation",
              "dsb2018"
            ]
          ],
          "items": {
            "type": "string"
          },
          "title": "Tags",
          "type": "array"
        },
        "version": {
          "anyOf": [
            {
              "$ref": "#/$defs/Version"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The version of the resource following SemVer 2.0."
        },
        "version_comment": {
          "anyOf": [
            {
              "maxLength": 512,
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A comment on the version of the resource.",
          "title": "Version Comment"
        },
        "format_version": {
          "const": "0.3.0",
          "description": "The **format** version of this resource specification",
          "title": "Format Version",
          "type": "string"
        },
        "documentation": {
          "anyOf": [
            {
              "anyOf": [
                {
                  "description": "A URL with the HTTP or HTTPS scheme.",
                  "format": "uri",
                  "maxLength": 2083,
                  "minLength": 1,
                  "title": "HttpUrl",
                  "type": "string"
                },
                {
                  "$ref": "#/$defs/RelativeFilePath"
                },
                {
                  "format": "file-path",
                  "title": "FilePath",
                  "type": "string"
                }
              ],
              "examples": [
                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
                "README.md"
              ]
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "URL or relative path to a markdown file encoded in UTF-8 with additional documentation.\nThe recommended documentation file name is `README.md`. An `.md` suffix is mandatory.",
          "title": "Documentation"
        },
        "badges": {
          "description": "badges associated with this resource",
          "items": {
            "$ref": "#/$defs/BadgeDescr"
          },
          "title": "Badges",
          "type": "array"
        },
        "config": {
          "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Config",
          "description": "A field for custom configuration that can contain any keys not present in the RDF spec.\nThis means you should not store, for example, a GitHub repo URL in `config` since there is a `git_repo` field.\nKeys in `config` may be very specific to a tool or consumer software. To avoid conflicting definitions,\nit is recommended to wrap added configuration into a sub-field named with the specific domain or tool name,\nfor example:\n```yaml\nconfig:\n    giraffe_neckometer:  # here is the domain name\n        length: 3837283\n        address:\n            home: zoo\n    imagej:              # config specific to ImageJ\n        macro_dir: path/to/macro/file\n```\nIf possible, please use [`snake_case`](https://en.wikipedia.org/wiki/Snake_case) for keys in `config`.\nYou may want to list linked files additionally under `attachments` to include them when packaging a resource.\n(Packaging a resource means downloading/copying important linked files and creating a ZIP archive that contains\nan altered rdf.yaml file with local references to the downloaded files.)"
        },
        "type": {
          "const": "dataset",
          "title": "Type",
          "type": "string"
        },
        "id": {
          "anyOf": [
            {
              "minLength": 1,
              "title": "DatasetId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "bioimage.io-wide unique resource identifier\nassigned by bioimage.io; version **un**specific.",
          "title": "Id"
        },
        "parent": {
          "anyOf": [
            {
              "minLength": 1,
              "title": "DatasetId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The description from which this one is derived",
          "title": "Parent"
        },
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "\"URL to the source of the dataset.",
          "title": "Source"
        }
      },
      "required": [
        "name",
        "format_version",
        "type"
      ],
      "title": "dataset 0.3.0",
      "type": "object"
    },
    "bioimageio__spec__generic__v0_2__Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_2.Author",
      "type": "object"
    },
    "bioimageio__spec__generic__v0_2__CiteEntry": {
      "additionalProperties": false,
      "properties": {
        "text": {
          "description": "free text description",
          "title": "Text",
          "type": "string"
        },
        "doi": {
          "anyOf": [
            {
              "description": "A digital object identifier, see https://www.doi.org/",
              "pattern": "^10\\.[0-9]{4}.+$",
              "title": "Doi",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A digital object identifier (DOI) is the prefered citation reference.\nSee https://www.doi.org/ for details. (alternatively specify `url`)",
          "title": "Doi"
        },
        "url": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "URL to cite (preferably specify a `doi` instead)",
          "title": "Url"
        }
      },
      "required": [
        "text"
      ],
      "title": "generic.v0_2.CiteEntry",
      "type": "object"
    },
    "bioimageio__spec__generic__v0_2__Maintainer": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Name"
        },
        "github_user": {
          "title": "Github User",
          "type": "string"
        }
      },
      "required": [
        "github_user"
      ],
      "title": "generic.v0_2.Maintainer",
      "type": "object"
    },
    "bioimageio__spec__generic__v0_3__Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_3.Author",
      "type": "object"
    },
    "bioimageio__spec__generic__v0_3__BioimageioConfig": {
      "additionalProperties": true,
      "description": "bioimage.io internal metadata.",
      "properties": {},
      "title": "generic.v0_3.BioimageioConfig",
      "type": "object"
    },
    "bioimageio__spec__generic__v0_3__CiteEntry": {
      "additionalProperties": false,
      "description": "A citation that should be referenced in work using this resource.",
      "properties": {
        "text": {
          "description": "free text description",
          "title": "Text",
          "type": "string"
        },
        "doi": {
          "anyOf": [
            {
              "description": "A digital object identifier, see https://www.doi.org/",
              "pattern": "^10\\.[0-9]{4}.+$",
              "title": "Doi",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "A digital object identifier (DOI) is the prefered citation reference.\nSee https://www.doi.org/ for details.\nNote:\n    Either **doi** or **url** have to be specified.",
          "title": "Doi"
        },
        "url": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "URL to cite (preferably specify a **doi** instead/also).\nNote:\n    Either **doi** or **url** have to be specified.",
          "title": "Url"
        }
      },
      "required": [
        "text"
      ],
      "title": "generic.v0_3.CiteEntry",
      "type": "object"
    },
    "bioimageio__spec__generic__v0_3__Config": {
      "additionalProperties": true,
      "description": "A place to store additional metadata (often tool specific).\n\nSuch additional metadata is typically set programmatically by the respective tool\nor by people with specific insights into the tool.\nIf you want to store additional metadata that does not match any of the other\nfields, think of a key unlikely to collide with anyone elses use-case/tool and save\nit here.\n\nPlease consider creating [an issue in the bioimageio.spec repository](https://github.com/bioimage-io/spec-bioimage-io/issues/new?template=Blank+issue)\nif you are not sure if an existing field could cover your use case\nor if you think such a field should exist.",
      "properties": {
        "bioimageio": {
          "$ref": "#/$defs/bioimageio__spec__generic__v0_3__BioimageioConfig"
        }
      },
      "title": "generic.v0_3.Config",
      "type": "object"
    },
    "bioimageio__spec__generic__v0_3__Maintainer": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Name"
        },
        "github_user": {
          "title": "Github User",
          "type": "string"
        }
      },
      "required": [
        "github_user"
      ],
      "title": "generic.v0_3.Maintainer",
      "type": "object"
    },
    "bioimageio__spec__model__v0_5__BioimageioConfig": {
      "additionalProperties": true,
      "properties": {
        "reproducibility_tolerance": {
          "default": [],
          "description": "Tolerances to allow when reproducing the model's test outputs\nfrom the model's test inputs.\nOnly the first entry matching tensor id and weights format is considered.",
          "items": {
            "$ref": "#/$defs/ReproducibilityTolerance"
          },
          "title": "Reproducibility Tolerance",
          "type": "array"
        }
      },
      "title": "model.v0_5.BioimageioConfig",
      "type": "object"
    },
    "bioimageio__spec__model__v0_5__Config": {
      "additionalProperties": true,
      "properties": {
        "bioimageio": {
          "$ref": "#/$defs/bioimageio__spec__model__v0_5__BioimageioConfig"
        }
      },
      "title": "model.v0_5.Config",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights.\nThese fields are typically stored in a YAML file which we call a model resource description file (model RDF).",
  "properties": {
    "name": {
      "description": "A human-readable name of this model.\nIt should be no longer than 64 characters\nand may only contain letter, number, underscore, minus, parentheses and spaces.\nWe recommend to chose a name that refers to the model's task and image modality.",
      "maxLength": 128,
      "minLength": 5,
      "title": "Name",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A string containing a brief description.",
      "maxLength": 1024,
      "title": "Description",
      "type": "string"
    },
    "covers": {
      "description": "Cover images. Please use an image smaller than 500KB and an aspect ratio width to height of 2:1 or 1:1.\nThe supported image formats are: ('.gif', '.jpeg', '.jpg', '.png', '.svg')",
      "examples": [
        [
          "cover.png"
        ]
      ],
      "items": {
        "anyOf": [
          {
            "description": "A URL with the HTTP or HTTPS scheme.",
            "format": "uri",
            "maxLength": 2083,
            "minLength": 1,
            "title": "HttpUrl",
            "type": "string"
          },
          {
            "$ref": "#/$defs/RelativeFilePath"
          },
          {
            "format": "file-path",
            "title": "FilePath",
            "type": "string"
          }
        ]
      },
      "title": "Covers",
      "type": "array"
    },
    "id_emoji": {
      "anyOf": [
        {
          "examples": [
            "\ud83e\udd88",
            "\ud83e\udda5"
          ],
          "maxLength": 2,
          "minLength": 1,
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "UTF-8 emoji for display alongside the `id`.",
      "title": "Id Emoji"
    },
    "authors": {
      "description": "The authors are the creators of the model RDF and the primary points of contact.",
      "items": {
        "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Author"
      },
      "title": "Authors",
      "type": "array"
    },
    "attachments": {
      "description": "file attachments",
      "items": {
        "$ref": "#/$defs/FileDescr"
      },
      "title": "Attachments",
      "type": "array"
    },
    "cite": {
      "description": "citations",
      "items": {
        "$ref": "#/$defs/bioimageio__spec__generic__v0_3__CiteEntry"
      },
      "title": "Cite",
      "type": "array"
    },
    "license": {
      "anyOf": [
        {
          "enum": [
            "0BSD",
            "AAL",
            "Abstyles",
            "AdaCore-doc",
            "Adobe-2006",
            "Adobe-Display-PostScript",
            "Adobe-Glyph",
            "Adobe-Utopia",
            "ADSL",
            "AFL-1.1",
            "AFL-1.2",
            "AFL-2.0",
            "AFL-2.1",
            "AFL-3.0",
            "Afmparse",
            "AGPL-1.0-only",
            "AGPL-1.0-or-later",
            "AGPL-3.0-only",
            "AGPL-3.0-or-later",
            "Aladdin",
            "AMDPLPA",
            "AML",
            "AML-glslang",
            "AMPAS",
            "ANTLR-PD",
            "ANTLR-PD-fallback",
            "Apache-1.0",
            "Apache-1.1",
            "Apache-2.0",
            "APAFML",
            "APL-1.0",
            "App-s2p",
            "APSL-1.0",
            "APSL-1.1",
            "APSL-1.2",
            "APSL-2.0",
            "Arphic-1999",
            "Artistic-1.0",
            "Artistic-1.0-cl8",
            "Artistic-1.0-Perl",
            "Artistic-2.0",
            "ASWF-Digital-Assets-1.0",
            "ASWF-Digital-Assets-1.1",
            "Baekmuk",
            "Bahyph",
            "Barr",
            "bcrypt-Solar-Designer",
            "Beerware",
            "Bitstream-Charter",
            "Bitstream-Vera",
            "BitTorrent-1.0",
            "BitTorrent-1.1",
            "blessing",
            "BlueOak-1.0.0",
            "Boehm-GC",
            "Borceux",
            "Brian-Gladman-2-Clause",
            "Brian-Gladman-3-Clause",
            "BSD-1-Clause",
            "BSD-2-Clause",
            "BSD-2-Clause-Darwin",
            "BSD-2-Clause-Patent",
            "BSD-2-Clause-Views",
            "BSD-3-Clause",
            "BSD-3-Clause-acpica",
            "BSD-3-Clause-Attribution",
            "BSD-3-Clause-Clear",
            "BSD-3-Clause-flex",
            "BSD-3-Clause-HP",
            "BSD-3-Clause-LBNL",
            "BSD-3-Clause-Modification",
            "BSD-3-Clause-No-Military-License",
            "BSD-3-Clause-No-Nuclear-License",
            "BSD-3-Clause-No-Nuclear-License-2014",
            "BSD-3-Clause-No-Nuclear-Warranty",
            "BSD-3-Clause-Open-MPI",
            "BSD-3-Clause-Sun",
            "BSD-4-Clause",
            "BSD-4-Clause-Shortened",
            "BSD-4-Clause-UC",
            "BSD-4.3RENO",
            "BSD-4.3TAHOE",
            "BSD-Advertising-Acknowledgement",
            "BSD-Attribution-HPND-disclaimer",
            "BSD-Inferno-Nettverk",
            "BSD-Protection",
            "BSD-Source-beginning-file",
            "BSD-Source-Code",
            "BSD-Systemics",
            "BSD-Systemics-W3Works",
            "BSL-1.0",
            "BUSL-1.1",
            "bzip2-1.0.6",
            "C-UDA-1.0",
            "CAL-1.0",
            "CAL-1.0-Combined-Work-Exception",
            "Caldera",
            "Caldera-no-preamble",
            "CATOSL-1.1",
            "CC-BY-1.0",
            "CC-BY-2.0",
            "CC-BY-2.5",
            "CC-BY-2.5-AU",
            "CC-BY-3.0",
            "CC-BY-3.0-AT",
            "CC-BY-3.0-AU",
            "CC-BY-3.0-DE",
            "CC-BY-3.0-IGO",
            "CC-BY-3.0-NL",
            "CC-BY-3.0-US",
            "CC-BY-4.0",
            "CC-BY-NC-1.0",
            "CC-BY-NC-2.0",
            "CC-BY-NC-2.5",
            "CC-BY-NC-3.0",
            "CC-BY-NC-3.0-DE",
            "CC-BY-NC-4.0",
            "CC-BY-NC-ND-1.0",
            "CC-BY-NC-ND-2.0",
            "CC-BY-NC-ND-2.5",
            "CC-BY-NC-ND-3.0",
            "CC-BY-NC-ND-3.0-DE",
            "CC-BY-NC-ND-3.0-IGO",
            "CC-BY-NC-ND-4.0",
            "CC-BY-NC-SA-1.0",
            "CC-BY-NC-SA-2.0",
            "CC-BY-NC-SA-2.0-DE",
            "CC-BY-NC-SA-2.0-FR",
            "CC-BY-NC-SA-2.0-UK",
            "CC-BY-NC-SA-2.5",
            "CC-BY-NC-SA-3.0",
            "CC-BY-NC-SA-3.0-DE",
            "CC-BY-NC-SA-3.0-IGO",
            "CC-BY-NC-SA-4.0",
            "CC-BY-ND-1.0",
            "CC-BY-ND-2.0",
            "CC-BY-ND-2.5",
            "CC-BY-ND-3.0",
            "CC-BY-ND-3.0-DE",
            "CC-BY-ND-4.0",
            "CC-BY-SA-1.0",
            "CC-BY-SA-2.0",
            "CC-BY-SA-2.0-UK",
            "CC-BY-SA-2.1-JP",
            "CC-BY-SA-2.5",
            "CC-BY-SA-3.0",
            "CC-BY-SA-3.0-AT",
            "CC-BY-SA-3.0-DE",
            "CC-BY-SA-3.0-IGO",
            "CC-BY-SA-4.0",
            "CC-PDDC",
            "CC0-1.0",
            "CDDL-1.0",
            "CDDL-1.1",
            "CDL-1.0",
            "CDLA-Permissive-1.0",
            "CDLA-Permissive-2.0",
            "CDLA-Sharing-1.0",
            "CECILL-1.0",
            "CECILL-1.1",
            "CECILL-2.0",
            "CECILL-2.1",
            "CECILL-B",
            "CECILL-C",
            "CERN-OHL-1.1",
            "CERN-OHL-1.2",
            "CERN-OHL-P-2.0",
            "CERN-OHL-S-2.0",
            "CERN-OHL-W-2.0",
            "CFITSIO",
            "check-cvs",
            "checkmk",
            "ClArtistic",
            "Clips",
            "CMU-Mach",
            "CMU-Mach-nodoc",
            "CNRI-Jython",
            "CNRI-Python",
            "CNRI-Python-GPL-Compatible",
            "COIL-1.0",
            "Community-Spec-1.0",
            "Condor-1.1",
            "copyleft-next-0.3.0",
            "copyleft-next-0.3.1",
            "Cornell-Lossless-JPEG",
            "CPAL-1.0",
            "CPL-1.0",
            "CPOL-1.02",
            "Cronyx",
            "Crossword",
            "CrystalStacker",
            "CUA-OPL-1.0",
            "Cube",
            "curl",
            "D-FSL-1.0",
            "DEC-3-Clause",
            "diffmark",
            "DL-DE-BY-2.0",
            "DL-DE-ZERO-2.0",
            "DOC",
            "Dotseqn",
            "DRL-1.0",
            "DRL-1.1",
            "DSDP",
            "dtoa",
            "dvipdfm",
            "ECL-1.0",
            "ECL-2.0",
            "EFL-1.0",
            "EFL-2.0",
            "eGenix",
            "Elastic-2.0",
            "Entessa",
            "EPICS",
            "EPL-1.0",
            "EPL-2.0",
            "ErlPL-1.1",
            "etalab-2.0",
            "EUDatagrid",
            "EUPL-1.0",
            "EUPL-1.1",
            "EUPL-1.2",
            "Eurosym",
            "Fair",
            "FBM",
            "FDK-AAC",
            "Ferguson-Twofish",
            "Frameworx-1.0",
            "FreeBSD-DOC",
            "FreeImage",
            "FSFAP",
            "FSFAP-no-warranty-disclaimer",
            "FSFUL",
            "FSFULLR",
            "FSFULLRWD",
            "FTL",
            "Furuseth",
            "fwlw",
            "GCR-docs",
            "GD",
            "GFDL-1.1-invariants-only",
            "GFDL-1.1-invariants-or-later",
            "GFDL-1.1-no-invariants-only",
            "GFDL-1.1-no-invariants-or-later",
            "GFDL-1.1-only",
            "GFDL-1.1-or-later",
            "GFDL-1.2-invariants-only",
            "GFDL-1.2-invariants-or-later",
            "GFDL-1.2-no-invariants-only",
            "GFDL-1.2-no-invariants-or-later",
            "GFDL-1.2-only",
            "GFDL-1.2-or-later",
            "GFDL-1.3-invariants-only",
            "GFDL-1.3-invariants-or-later",
            "GFDL-1.3-no-invariants-only",
            "GFDL-1.3-no-invariants-or-later",
            "GFDL-1.3-only",
            "GFDL-1.3-or-later",
            "Giftware",
            "GL2PS",
            "Glide",
            "Glulxe",
            "GLWTPL",
            "gnuplot",
            "GPL-1.0-only",
            "GPL-1.0-or-later",
            "GPL-2.0-only",
            "GPL-2.0-or-later",
            "GPL-3.0-only",
            "GPL-3.0-or-later",
            "Graphics-Gems",
            "gSOAP-1.3b",
            "gtkbook",
            "HaskellReport",
            "hdparm",
            "Hippocratic-2.1",
            "HP-1986",
            "HP-1989",
            "HPND",
            "HPND-DEC",
            "HPND-doc",
            "HPND-doc-sell",
            "HPND-export-US",
            "HPND-export-US-modify",
            "HPND-Fenneberg-Livingston",
            "HPND-INRIA-IMAG",
            "HPND-Kevlin-Henney",
            "HPND-Markus-Kuhn",
            "HPND-MIT-disclaimer",
            "HPND-Pbmplus",
            "HPND-sell-MIT-disclaimer-xserver",
            "HPND-sell-regexpr",
            "HPND-sell-variant",
            "HPND-sell-variant-MIT-disclaimer",
            "HPND-UC",
            "HTMLTIDY",
            "IBM-pibs",
            "ICU",
            "IEC-Code-Components-EULA",
            "IJG",
            "IJG-short",
            "ImageMagick",
            "iMatix",
            "Imlib2",
            "Info-ZIP",
            "Inner-Net-2.0",
            "Intel",
            "Intel-ACPI",
            "Interbase-1.0",
            "IPA",
            "IPL-1.0",
            "ISC",
            "ISC-Veillard",
            "Jam",
            "JasPer-2.0",
            "JPL-image",
            "JPNIC",
            "JSON",
            "Kastrup",
            "Kazlib",
            "Knuth-CTAN",
            "LAL-1.2",
            "LAL-1.3",
            "Latex2e",
            "Latex2e-translated-notice",
            "Leptonica",
            "LGPL-2.0-only",
            "LGPL-2.0-or-later",
            "LGPL-2.1-only",
            "LGPL-2.1-or-later",
            "LGPL-3.0-only",
            "LGPL-3.0-or-later",
            "LGPLLR",
            "Libpng",
            "libpng-2.0",
            "libselinux-1.0",
            "libtiff",
            "libutil-David-Nugent",
            "LiLiQ-P-1.1",
            "LiLiQ-R-1.1",
            "LiLiQ-Rplus-1.1",
            "Linux-man-pages-1-para",
            "Linux-man-pages-copyleft",
            "Linux-man-pages-copyleft-2-para",
            "Linux-man-pages-copyleft-var",
            "Linux-OpenIB",
            "LOOP",
            "LPD-document",
            "LPL-1.0",
            "LPL-1.02",
            "LPPL-1.0",
            "LPPL-1.1",
            "LPPL-1.2",
            "LPPL-1.3a",
            "LPPL-1.3c",
            "lsof",
            "Lucida-Bitmap-Fonts",
            "LZMA-SDK-9.11-to-9.20",
            "LZMA-SDK-9.22",
            "Mackerras-3-Clause",
            "Mackerras-3-Clause-acknowledgment",
            "magaz",
            "mailprio",
            "MakeIndex",
            "Martin-Birgmeier",
            "McPhee-slideshow",
            "metamail",
            "Minpack",
            "MirOS",
            "MIT",
            "MIT-0",
            "MIT-advertising",
            "MIT-CMU",
            "MIT-enna",
            "MIT-feh",
            "MIT-Festival",
            "MIT-Modern-Variant",
            "MIT-open-group",
            "MIT-testregex",
            "MIT-Wu",
            "MITNFA",
            "MMIXware",
            "Motosoto",
            "MPEG-SSG",
            "mpi-permissive",
            "mpich2",
            "MPL-1.0",
            "MPL-1.1",
            "MPL-2.0",
            "MPL-2.0-no-copyleft-exception",
            "mplus",
            "MS-LPL",
            "MS-PL",
            "MS-RL",
            "MTLL",
            "MulanPSL-1.0",
            "MulanPSL-2.0",
            "Multics",
            "Mup",
            "NAIST-2003",
            "NASA-1.3",
            "Naumen",
            "NBPL-1.0",
            "NCGL-UK-2.0",
            "NCSA",
            "Net-SNMP",
            "NetCDF",
            "Newsletr",
            "NGPL",
            "NICTA-1.0",
            "NIST-PD",
            "NIST-PD-fallback",
            "NIST-Software",
            "NLOD-1.0",
            "NLOD-2.0",
            "NLPL",
            "Nokia",
            "NOSL",
            "Noweb",
            "NPL-1.0",
            "NPL-1.1",
            "NPOSL-3.0",
            "NRL",
            "NTP",
            "NTP-0",
            "O-UDA-1.0",
            "OCCT-PL",
            "OCLC-2.0",
            "ODbL-1.0",
            "ODC-By-1.0",
            "OFFIS",
            "OFL-1.0",
            "OFL-1.0-no-RFN",
            "OFL-1.0-RFN",
            "OFL-1.1",
            "OFL-1.1-no-RFN",
            "OFL-1.1-RFN",
            "OGC-1.0",
            "OGDL-Taiwan-1.0",
            "OGL-Canada-2.0",
            "OGL-UK-1.0",
            "OGL-UK-2.0",
            "OGL-UK-3.0",
            "OGTSL",
            "OLDAP-1.1",
            "OLDAP-1.2",
            "OLDAP-1.3",
            "OLDAP-1.4",
            "OLDAP-2.0",
            "OLDAP-2.0.1",
            "OLDAP-2.1",
            "OLDAP-2.2",
            "OLDAP-2.2.1",
            "OLDAP-2.2.2",
            "OLDAP-2.3",
            "OLDAP-2.4",
            "OLDAP-2.5",
            "OLDAP-2.6",
            "OLDAP-2.7",
            "OLDAP-2.8",
            "OLFL-1.3",
            "OML",
            "OpenPBS-2.3",
            "OpenSSL",
            "OpenSSL-standalone",
            "OpenVision",
            "OPL-1.0",
            "OPL-UK-3.0",
            "OPUBL-1.0",
            "OSET-PL-2.1",
            "OSL-1.0",
            "OSL-1.1",
            "OSL-2.0",
            "OSL-2.1",
            "OSL-3.0",
            "PADL",
            "Parity-6.0.0",
            "Parity-7.0.0",
            "PDDL-1.0",
            "PHP-3.0",
            "PHP-3.01",
            "Pixar",
            "Plexus",
            "pnmstitch",
            "PolyForm-Noncommercial-1.0.0",
            "PolyForm-Small-Business-1.0.0",
            "PostgreSQL",
            "PSF-2.0",
            "psfrag",
            "psutils",
            "Python-2.0",
            "Python-2.0.1",
            "python-ldap",
            "Qhull",
            "QPL-1.0",
            "QPL-1.0-INRIA-2004",
            "radvd",
            "Rdisc",
            "RHeCos-1.1",
            "RPL-1.1",
            "RPL-1.5",
            "RPSL-1.0",
            "RSA-MD",
            "RSCPL",
            "Ruby",
            "SAX-PD",
            "SAX-PD-2.0",
            "Saxpath",
            "SCEA",
            "SchemeReport",
            "Sendmail",
            "Sendmail-8.23",
            "SGI-B-1.0",
            "SGI-B-1.1",
            "SGI-B-2.0",
            "SGI-OpenGL",
            "SGP4",
            "SHL-0.5",
            "SHL-0.51",
            "SimPL-2.0",
            "SISSL",
            "SISSL-1.2",
            "SL",
            "Sleepycat",
            "SMLNJ",
            "SMPPL",
            "SNIA",
            "snprintf",
            "softSurfer",
            "Soundex",
            "Spencer-86",
            "Spencer-94",
            "Spencer-99",
            "SPL-1.0",
            "ssh-keyscan",
            "SSH-OpenSSH",
            "SSH-short",
            "SSLeay-standalone",
            "SSPL-1.0",
            "SugarCRM-1.1.3",
            "Sun-PPP",
            "SunPro",
            "SWL",
            "swrule",
            "Symlinks",
            "TAPR-OHL-1.0",
            "TCL",
            "TCP-wrappers",
            "TermReadKey",
            "TGPPL-1.0",
            "TMate",
            "TORQUE-1.1",
            "TOSL",
            "TPDL",
            "TPL-1.0",
            "TTWL",
            "TTYP0",
            "TU-Berlin-1.0",
            "TU-Berlin-2.0",
            "UCAR",
            "UCL-1.0",
            "ulem",
            "UMich-Merit",
            "Unicode-3.0",
            "Unicode-DFS-2015",
            "Unicode-DFS-2016",
            "Unicode-TOU",
            "UnixCrypt",
            "Unlicense",
            "UPL-1.0",
            "URT-RLE",
            "Vim",
            "VOSTROM",
            "VSL-1.0",
            "W3C",
            "W3C-19980720",
            "W3C-20150513",
            "w3m",
            "Watcom-1.0",
            "Widget-Workshop",
            "Wsuipa",
            "WTFPL",
            "X11",
            "X11-distribute-modifications-variant",
            "Xdebug-1.03",
            "Xerox",
            "Xfig",
            "XFree86-1.1",
            "xinetd",
            "xkeyboard-config-Zinoviev",
            "xlock",
            "Xnet",
            "xpp",
            "XSkat",
            "YPL-1.0",
            "YPL-1.1",
            "Zed",
            "Zeeff",
            "Zend-2.0",
            "Zimbra-1.3",
            "Zimbra-1.4",
            "Zlib",
            "zlib-acknowledgement",
            "ZPL-1.1",
            "ZPL-2.0",
            "ZPL-2.1"
          ],
          "title": "LicenseId",
          "type": "string"
        },
        {
          "enum": [
            "AGPL-1.0",
            "AGPL-3.0",
            "BSD-2-Clause-FreeBSD",
            "BSD-2-Clause-NetBSD",
            "bzip2-1.0.5",
            "eCos-2.0",
            "GFDL-1.1",
            "GFDL-1.2",
            "GFDL-1.3",
            "GPL-1.0",
            "GPL-1.0+",
            "GPL-2.0",
            "GPL-2.0+",
            "GPL-2.0-with-autoconf-exception",
            "GPL-2.0-with-bison-exception",
            "GPL-2.0-with-classpath-exception",
            "GPL-2.0-with-font-exception",
            "GPL-2.0-with-GCC-exception",
            "GPL-3.0",
            "GPL-3.0+",
            "GPL-3.0-with-autoconf-exception",
            "GPL-3.0-with-GCC-exception",
            "LGPL-2.0",
            "LGPL-2.0+",
            "LGPL-2.1",
            "LGPL-2.1+",
            "LGPL-3.0",
            "LGPL-3.0+",
            "Nunit",
            "StandardML-NJ",
            "wxWindows"
          ],
          "title": "DeprecatedLicenseId",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "A [SPDX license identifier](https://spdx.org/licenses/).\nWe do not support custom license beyond the SPDX license list, if you need that please\n[open a GitHub issue](https://github.com/bioimage-io/spec-bioimage-io/issues/new/choose)\nto discuss your intentions with the community.",
      "examples": [
        "CC0-1.0",
        "MIT",
        "BSD-2-Clause"
      ],
      "title": "License"
    },
    "git_repo": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "A URL to the Git repository where the resource is being developed.",
      "examples": [
        "https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad"
      ],
      "title": "Git Repo"
    },
    "icon": {
      "anyOf": [
        {
          "maxLength": 2,
          "minLength": 1,
          "type": "string"
        },
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "An icon for illustration, e.g. on bioimage.io",
      "title": "Icon"
    },
    "links": {
      "description": "IDs of other bioimage.io resources",
      "examples": [
        [
          "ilastik/ilastik",
          "deepimagej/deepimagej",
          "zero/notebook_u-net_3d_zerocostdl4mic"
        ]
      ],
      "items": {
        "type": "string"
      },
      "title": "Links",
      "type": "array"
    },
    "uploader": {
      "anyOf": [
        {
          "$ref": "#/$defs/Uploader"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The person who uploaded the model (e.g. to bioimage.io)"
    },
    "maintainers": {
      "description": "Maintainers of this resource.\nIf not specified, `authors` are maintainers and at least some of them has to specify their `github_user` name",
      "items": {
        "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Maintainer"
      },
      "title": "Maintainers",
      "type": "array"
    },
    "tags": {
      "description": "Associated tags",
      "examples": [
        [
          "unet2d",
          "pytorch",
          "nucleus",
          "segmentation",
          "dsb2018"
        ]
      ],
      "items": {
        "type": "string"
      },
      "title": "Tags",
      "type": "array"
    },
    "version": {
      "anyOf": [
        {
          "$ref": "#/$defs/Version"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The version of the resource following SemVer 2.0."
    },
    "version_comment": {
      "anyOf": [
        {
          "maxLength": 512,
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "A comment on the version of the resource.",
      "title": "Version Comment"
    },
    "format_version": {
      "const": "0.5.6",
      "description": "Version of the bioimage.io model description specification used.\nWhen creating a new model always use the latest micro/patch version described here.\nThe `format_version` is important for any consumer software to understand how to parse the fields.",
      "title": "Format Version",
      "type": "string"
    },
    "type": {
      "const": "model",
      "description": "Specialized resource type 'model'",
      "title": "Type",
      "type": "string"
    },
    "id": {
      "anyOf": [
        {
          "minLength": 1,
          "title": "ModelId",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "bioimage.io-wide unique resource identifier\nassigned by bioimage.io; version **un**specific.",
      "title": "Id"
    },
    "documentation": {
      "anyOf": [
        {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "examples": [
            "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
            "README.md"
          ]
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "URL or relative path to a markdown file with additional documentation.\nThe recommended documentation file name is `README.md`. An `.md` suffix is mandatory.\nThe documentation should include a '#[#] Validation' (sub)section\nwith details on how to quantitatively validate the model on unseen data.",
      "title": "Documentation"
    },
    "inputs": {
      "description": "Describes the input tensors expected by this model.",
      "items": {
        "$ref": "#/$defs/InputTensorDescr"
      },
      "minItems": 1,
      "title": "Inputs",
      "type": "array"
    },
    "outputs": {
      "description": "Describes the output tensors.",
      "items": {
        "$ref": "#/$defs/OutputTensorDescr"
      },
      "minItems": 1,
      "title": "Outputs",
      "type": "array"
    },
    "packaged_by": {
      "description": "The persons that have packaged and uploaded this model.\nOnly required if those persons differ from the `authors`.",
      "items": {
        "$ref": "#/$defs/bioimageio__spec__generic__v0_3__Author"
      },
      "title": "Packaged By",
      "type": "array"
    },
    "parent": {
      "anyOf": [
        {
          "$ref": "#/$defs/LinkedModel"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The model from which this model is derived, e.g. by fine-tuning the weights."
    },
    "run_mode": {
      "anyOf": [
        {
          "$ref": "#/$defs/RunMode"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Custom run mode for this model: for more complex prediction procedures like test time\ndata augmentation that currently cannot be expressed in the specification.\nNo standard run modes are defined yet."
    },
    "timestamp": {
      "$ref": "#/$defs/Datetime",
      "description": "Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format\nwith a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat).\n(In Python a datetime object is valid, too)."
    },
    "training_data": {
      "anyOf": [
        {
          "$ref": "#/$defs/LinkedDataset"
        },
        {
          "$ref": "#/$defs/bioimageio__spec__dataset__v0_3__DatasetDescr"
        },
        {
          "$ref": "#/$defs/bioimageio__spec__dataset__v0_2__DatasetDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The dataset used to train this model",
      "title": "Training Data"
    },
    "weights": {
      "$ref": "#/$defs/WeightsDescr",
      "description": "The weights for this model.\nWeights can be given for different formats, but should otherwise be equivalent.\nThe available weight formats determine which consumers can use this model."
    },
    "config": {
      "$ref": "#/$defs/bioimageio__spec__model__v0_5__Config"
    }
  },
  "required": [
    "name",
    "format_version",
    "type",
    "inputs",
    "outputs",
    "weights"
  ],
  "title": "model 0.5.6",
  "type": "object"
}

Fields:

Validators:

  • _validate_documentationdocumentation
  • _validate_input_axesinputs
  • _validate_test_tensors
  • _validate_tensor_references_in_proc_kwargs
  • _validate_tensor_idsoutputs
  • _validate_output_axesoutputs
  • _validate_parent_is_not_self
  • _add_default_cover
  • _convert

attachments pydantic-field ¤

attachments: List[FileDescr_]

file attachments

authors pydantic-field ¤

authors: FAIR[List[Author]]

The authors are the creators of the model RDF and the primary points of contact.

cite pydantic-field ¤

cite: FAIR[List[CiteEntry]]

citations

config pydantic-field ¤

config: Config

covers pydantic-field ¤

covers: List[FileSource_cover]

Cover images.

description pydantic-field ¤

description: FAIR[
    Annotated[
        str,
        MaxLen(1024),
        warn(
            MaxLen(512),
            "Description longer than 512 characters.",
        ),
    ]
] = ""

A string containing a brief description.

documentation pydantic-field ¤

documentation: FAIR[Optional[FileSource_documentation]] = (
    None
)

URL or relative path to a markdown file with additional documentation. The recommended documentation file name is README.md. An .md suffix is mandatory. The documentation should include a '#[#] Validation' (sub)section with details on how to quantitatively validate the model on unseen data.

file_name property ¤

file_name: Optional[FileName]

File name of the bioimageio.yaml file the description was loaded from.

format_version pydantic-field ¤

format_version: Literal['0.5.6'] = '0.5.6'

git_repo pydantic-field ¤

git_repo: Annotated[
    Optional[HttpUrl],
    Field(
        examples=[
            "https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad"
        ]
    ),
] = None

A URL to the Git repository where the resource is being developed.

icon pydantic-field ¤

icon: Union[
    Annotated[str, Len(min_length=1, max_length=2)],
    FileSource_,
    None,
] = None

An icon for illustration, e.g. on bioimage.io

id pydantic-field ¤

id: Optional[ModelId] = None

bioimage.io-wide unique resource identifier assigned by bioimage.io; version unspecific.

id_emoji pydantic-field ¤

id_emoji: Optional[
    Annotated[
        str,
        Len(min_length=1, max_length=2),
        Field(examples=["🦈", "🦥"]),
    ]
] = None

UTF-8 emoji for display alongside the id.

implemented_format_version class-attribute ¤

implemented_format_version: Literal['0.5.6'] = '0.5.6'

implemented_format_version_tuple class-attribute ¤

implemented_format_version_tuple: Tuple[int, int, int]

implemented_type class-attribute ¤

implemented_type: Literal['model'] = 'model'

inputs pydantic-field ¤

inputs: NotEmpty[Sequence[InputTensorDescr]]

Describes the input tensors expected by this model.

license pydantic-field ¤

license: FAIR[
    Annotated[
        Annotated[
            Union[LicenseId, DeprecatedLicenseId, None],
            Field(union_mode="left_to_right"),
        ],
        warn(
            Optional[LicenseId],
            "{value} is deprecated, see https://spdx.org/licenses/{value}.html",
        ),
        Field(examples=["CC0-1.0", "MIT", "BSD-2-Clause"]),
    ]
] = None

A SPDX license identifier. We do not support custom license beyond the SPDX license list, if you need that please open a GitHub issue to discuss your intentions with the community.

links: Annotated[
    List[str],
    Field(
        examples=[
            (
                "ilastik/ilastik",
                "deepimagej/deepimagej",
                "zero/notebook_u-net_3d_zerocostdl4mic",
            )
        ]
    ),
]

IDs of other bioimage.io resources

maintainers pydantic-field ¤

maintainers: List[Maintainer]

Maintainers of this resource. If not specified, authors are maintainers and at least some of them has to specify their github_user name

name pydantic-field ¤

name: Annotated[
    str,
    RestrictCharacters(
        string.ascii_letters + string.digits + "_+- ()"
    ),
    MinLen(5),
    MaxLen(128),
    warn(
        MaxLen(64), "Name longer than 64 characters.", INFO
    ),
]

A human-readable name of this model. It should be no longer than 64 characters and may only contain letter, number, underscore, minus, parentheses and spaces. We recommend to chose a name that refers to the model's task and image modality.

outputs pydantic-field ¤

outputs: NotEmpty[Sequence[OutputTensorDescr]]

Describes the output tensors.

packaged_by pydantic-field ¤

packaged_by: List[Author]

The persons that have packaged and uploaded this model. Only required if those persons differ from the authors.

parent pydantic-field ¤

parent: Optional[LinkedModel] = None

The model from which this model is derived, e.g. by fine-tuning the weights.

root property ¤

root: Union[RootHttpUrl, DirectoryPath, ZipFile]

The URL/Path prefix to resolve any relative paths with.

run_mode pydantic-field ¤

run_mode: Annotated[
    Optional[RunMode],
    warn(
        None,
        "Run mode '{value}' has limited support across consumer softwares.",
    ),
] = None

Custom run mode for this model: for more complex prediction procedures like test time data augmentation that currently cannot be expressed in the specification. No standard run modes are defined yet.

tags pydantic-field ¤

tags: FAIR[
    Annotated[
        List[str],
        Field(
            examples=[
                (
                    "unet2d",
                    "pytorch",
                    "nucleus",
                    "segmentation",
                    "dsb2018",
                )
            ]
        ),
    ]
]

Associated tags

timestamp pydantic-field ¤

timestamp: Datetime

Timestamp in ISO 8601 format with a few restrictions listed here. (In Python a datetime object is valid, too).

training_data pydantic-field ¤

training_data: Annotated[
    Union[
        None, LinkedDataset, DatasetDescr, DatasetDescr02
    ],
    Field(union_mode="left_to_right"),
] = None

The dataset used to train this model

type pydantic-field ¤

type: Literal['model'] = 'model'

uploader pydantic-field ¤

uploader: Optional[Uploader] = None

The person who uploaded the model (e.g. to bioimage.io)

validation_summary property ¤

validation_summary: ValidationSummary

version pydantic-field ¤

version: Optional[Version] = None

The version of the resource following SemVer 2.0.

version_comment pydantic-field ¤

version_comment: Optional[Annotated[str, MaxLen(512)]] = (
    None
)

A comment on the version of the resource.

weights pydantic-field ¤

The weights for this model. Weights can be given for different formats, but should otherwise be equivalent. The available weight formats determine which consumers can use this model.

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any)
Source code in src/bioimageio/spec/_internal/common_nodes.py
199
200
201
202
203
204
205
206
207
208
209
210
211
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any):
    super().__pydantic_init_subclass__(**kwargs)
    # set classvar implemented_format_version_tuple
    if "format_version" in cls.model_fields:
        if "." not in cls.implemented_format_version:
            cls.implemented_format_version_tuple = (0, 0, 0)
        else:
            fv_tuple = get_format_version_tuple(cls.implemented_format_version)
            assert fv_tuple is not None, (
                f"failed to cast '{cls.implemented_format_version}' to tuple"
            )
            cls.implemented_format_version_tuple = fv_tuple

convert_from_old_format_wo_validation classmethod ¤

convert_from_old_format_wo_validation(
    data: Dict[str, Any],
) -> None

Convert metadata following an older format version to this classes' format without validating the result.

Source code in src/bioimageio/spec/model/v0_5.py
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
@classmethod
def convert_from_old_format_wo_validation(cls, data: Dict[str, Any]) -> None:
    """Convert metadata following an older format version to this classes' format
    without validating the result.
    """
    if (
        data.get("type") == "model"
        and isinstance(fv := data.get("format_version"), str)
        and fv.count(".") == 2
    ):
        fv_parts = fv.split(".")
        if any(not p.isdigit() for p in fv_parts):
            return

        fv_tuple = tuple(map(int, fv_parts))

        assert cls.implemented_format_version_tuple[0:2] == (0, 5)
        if fv_tuple[:2] in ((0, 3), (0, 4)):
            m04 = _ModelDescr_v0_4.load(data)
            if isinstance(m04, InvalidDescr):
                try:
                    updated = _model_conv.convert_as_dict(
                        m04  # pyright: ignore[reportArgumentType]
                    )
                except Exception as e:
                    logger.error(
                        "Failed to convert from invalid model 0.4 description."
                        + f"\nerror: {e}"
                        + "\nProceeding with model 0.5 validation without conversion."
                    )
                    updated = None
            else:
                updated = _model_conv.convert_as_dict(m04)

            if updated is not None:
                data.clear()
                data.update(updated)

        elif fv_tuple[:2] == (0, 5):
            # bump patch version
            data["format_version"] = cls.implemented_format_version

get_axis_sizes ¤

get_axis_sizes(
    ns: Mapping[
        Tuple[TensorId, AxisId], ParameterizedSize_N
    ],
    batch_size: Optional[int] = None,
    *,
    max_input_shape: Optional[
        Mapping[Tuple[TensorId, AxisId], int]
    ] = None,
) -> _AxisSizes

Determine input and output block shape for scale factors ns of parameterized input sizes.

Parameters:

  • ns ¤
    (Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N]) –

    Scale factor n for each axis (keyed by (tensor_id, axis_id)) that is parameterized as size = min + n * step.

  • batch_size ¤
    (Optional[int], default: None ) –

    The desired size of the batch dimension. If given batch_size overwrites any batch size present in max_input_shape. Default 1.

  • max_input_shape ¤
    (Optional[Mapping[Tuple[TensorId, AxisId], int]], default: None ) –

    Limits the derived block shapes. Each axis for which the input size, parameterized by n, is larger than max_input_shape is set to the minimal value n_min for which this is still true. Use this for small input samples or large values of ns. Or simply whenever you know the full input shape.

Returns:

  • _AxisSizes

    Resolved axis sizes for model inputs and outputs.

Source code in src/bioimageio/spec/model/v0_5.py
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
def get_axis_sizes(
    self,
    ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
    batch_size: Optional[int] = None,
    *,
    max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
) -> _AxisSizes:
    """Determine input and output block shape for scale factors **ns**
    of parameterized input sizes.

    Args:
        ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
            that is parameterized as `size = min + n * step`.
        batch_size: The desired size of the batch dimension.
            If given **batch_size** overwrites any batch size present in
            **max_input_shape**. Default 1.
        max_input_shape: Limits the derived block shapes.
            Each axis for which the input size, parameterized by `n`, is larger
            than **max_input_shape** is set to the minimal value `n_min` for which
            this is still true.
            Use this for small input samples or large values of **ns**.
            Or simply whenever you know the full input shape.

    Returns:
        Resolved axis sizes for model inputs and outputs.
    """
    max_input_shape = max_input_shape or {}
    if batch_size is None:
        for (_t_id, a_id), s in max_input_shape.items():
            if a_id == BATCH_AXIS_ID:
                batch_size = s
                break
        else:
            batch_size = 1

    all_axes = {
        t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    }

    inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}

    def get_axis_size(a: Union[InputAxis, OutputAxis]):
        if isinstance(a, BatchAxis):
            if (t_descr.id, a.id) in ns:
                logger.warning(
                    "Ignoring unexpected size increment factor (n) for batch axis"
                    + " of tensor '{}'.",
                    t_descr.id,
                )
            return batch_size
        elif isinstance(a.size, int):
            if (t_descr.id, a.id) in ns:
                logger.warning(
                    "Ignoring unexpected size increment factor (n) for fixed size"
                    + " axis '{}' of tensor '{}'.",
                    a.id,
                    t_descr.id,
                )
            return a.size
        elif isinstance(a.size, ParameterizedSize):
            if (t_descr.id, a.id) not in ns:
                raise ValueError(
                    "Size increment factor (n) missing for parametrized axis"
                    + f" '{a.id}' of tensor '{t_descr.id}'."
                )
            n = ns[(t_descr.id, a.id)]
            s_max = max_input_shape.get((t_descr.id, a.id))
            if s_max is not None:
                n = min(n, a.size.get_n(s_max))

            return a.size.get_size(n)

        elif isinstance(a.size, SizeReference):
            if (t_descr.id, a.id) in ns:
                logger.warning(
                    "Ignoring unexpected size increment factor (n) for axis '{}'"
                    + " of tensor '{}' with size reference.",
                    a.id,
                    t_descr.id,
                )
            assert not isinstance(a, BatchAxis)
            ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
            assert not isinstance(ref_axis, BatchAxis)
            ref_key = (a.size.tensor_id, a.size.axis_id)
            ref_size = inputs.get(ref_key, outputs.get(ref_key))
            assert ref_size is not None, ref_key
            assert not isinstance(ref_size, _DataDepSize), ref_key
            return a.size.get_size(
                axis=a,
                ref_axis=ref_axis,
                ref_size=ref_size,
            )
        elif isinstance(a.size, DataDependentSize):
            if (t_descr.id, a.id) in ns:
                logger.warning(
                    "Ignoring unexpected increment factor (n) for data dependent"
                    + " size axis '{}' of tensor '{}'.",
                    a.id,
                    t_descr.id,
                )
            return _DataDepSize(a.size.min, a.size.max)
        else:
            assert_never(a.size)

    # first resolve all , but the `SizeReference` input sizes
    for t_descr in self.inputs:
        for a in t_descr.axes:
            if not isinstance(a.size, SizeReference):
                s = get_axis_size(a)
                assert not isinstance(s, _DataDepSize)
                inputs[t_descr.id, a.id] = s

    # resolve all other input axis sizes
    for t_descr in self.inputs:
        for a in t_descr.axes:
            if isinstance(a.size, SizeReference):
                s = get_axis_size(a)
                assert not isinstance(s, _DataDepSize)
                inputs[t_descr.id, a.id] = s

    # resolve all output axis sizes
    for t_descr in self.outputs:
        for a in t_descr.axes:
            assert not isinstance(a.size, ParameterizedSize)
            s = get_axis_size(a)
            outputs[t_descr.id, a.id] = s

    return _AxisSizes(inputs=inputs, outputs=outputs)

get_batch_size staticmethod ¤

get_batch_size(
    tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]],
) -> int
Source code in src/bioimageio/spec/model/v0_5.py
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
@staticmethod
def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    batch_size = 1
    tensor_with_batchsize: Optional[TensorId] = None
    for tid in tensor_sizes:
        for aid, s in tensor_sizes[tid].items():
            if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
                continue

            if batch_size != 1:
                assert tensor_with_batchsize is not None
                raise ValueError(
                    f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
                )

            batch_size = s
            tensor_with_batchsize = tid

    return batch_size

get_input_test_arrays ¤

get_input_test_arrays() -> List[NDArray[Any]]
Source code in src/bioimageio/spec/model/v0_5.py
3057
3058
def get_input_test_arrays(self) -> List[NDArray[Any]]:
    return self._get_test_arrays(self.inputs)

get_ns ¤

get_ns(
    input_sizes: Mapping[TensorId, Mapping[AxisId, int]],
)

get parameter n for each parameterized axis such that the valid input size is >= the given input size

Source code in src/bioimageio/spec/model/v0_5.py
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    """get parameter `n` for each parameterized axis
    such that the valid input size is >= the given input size"""
    ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    for tid in input_sizes:
        for aid, s in input_sizes[tid].items():
            size_descr = axes[tid][aid].size
            if isinstance(size_descr, ParameterizedSize):
                ret[(tid, aid)] = size_descr.get_n(s)
            elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
                pass
            else:
                assert_never(size_descr)

    return ret

get_output_tensor_sizes ¤

get_output_tensor_sizes(
    input_sizes: Mapping[TensorId, Mapping[AxisId, int]],
) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]

Returns the tensor output sizes for given input_sizes. Only if input_sizes has a valid input shape, the tensor output size is exact. Otherwise it might be larger than the actual (valid) output

Source code in src/bioimageio/spec/model/v0_5.py
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
def get_output_tensor_sizes(
    self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    """Returns the tensor output sizes for given **input_sizes**.
    Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    Otherwise it might be larger than the actual (valid) output"""
    batch_size = self.get_batch_size(input_sizes)
    ns = self.get_ns(input_sizes)

    tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    return tensor_sizes.outputs

get_output_test_arrays ¤

get_output_test_arrays() -> List[NDArray[Any]]
Source code in src/bioimageio/spec/model/v0_5.py
3060
3061
def get_output_test_arrays(self) -> List[NDArray[Any]]:
    return self._get_test_arrays(self.outputs)

get_package_content ¤

get_package_content() -> Dict[
    FileName, Union[FileDescr, BioimageioYamlContent]
]

Returns package content without creating the package.

Source code in src/bioimageio/spec/_internal/common_nodes.py
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
def get_package_content(
    self,
) -> Dict[FileName, Union[FileDescr, BioimageioYamlContent]]:
    """Returns package content without creating the package."""
    content: Dict[FileName, FileDescr] = {}
    with PackagingContext(
        bioimageio_yaml_file_name=BIOIMAGEIO_YAML,
        file_sources=content,
    ):
        rdf_content: BioimageioYamlContent = self.model_dump(
            mode="json", exclude_unset=True
        )

    _ = rdf_content.pop("rdf_source", None)

    return {**content, BIOIMAGEIO_YAML: rdf_content}

get_tensor_sizes ¤

get_tensor_sizes(
    ns: Mapping[
        Tuple[TensorId, AxisId], ParameterizedSize_N
    ],
    batch_size: int,
) -> _TensorSizes
Source code in src/bioimageio/spec/model/v0_5.py
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
def get_tensor_sizes(
    self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
) -> _TensorSizes:
    axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    return _TensorSizes(
        {
            t: {
                aa: axis_sizes.inputs[(tt, aa)]
                for tt, aa in axis_sizes.inputs
                if tt == t
            }
            for t in {tt for tt, _ in axis_sizes.inputs}
        },
        {
            t: {
                aa: axis_sizes.outputs[(tt, aa)]
                for tt, aa in axis_sizes.outputs
                if tt == t
            }
            for t in {tt for tt, _ in axis_sizes.outputs}
        },
    )

load classmethod ¤

load(
    data: BioimageioYamlContentView,
    context: Optional[ValidationContext] = None,
) -> Union[Self, InvalidDescr]

factory method to create a resource description object

Source code in src/bioimageio/spec/_internal/common_nodes.py
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
@classmethod
def load(
    cls,
    data: BioimageioYamlContentView,
    context: Optional[ValidationContext] = None,
) -> Union[Self, InvalidDescr]:
    """factory method to create a resource description object"""
    context = context or get_validation_context()
    if context.perform_io_checks:
        file_descrs = extract_file_descrs({k: v for k, v in data.items()})
        populate_cache(file_descrs)  # TODO: add progress bar

    with context.replace(log_warnings=context.warning_level <= INFO):
        rd, errors, val_warnings = cls._load_impl(deepcopy_yaml_value(data))

    if context.warning_level > INFO:
        all_warnings_context = context.replace(
            warning_level=INFO, log_warnings=False, raise_errors=False
        )
        # raise all validation warnings by reloading
        with all_warnings_context:
            _, _, val_warnings = cls._load_impl(deepcopy_yaml_value(data))

    format_status = "failed" if errors else "passed"
    rd.validation_summary.add_detail(
        ValidationDetail(
            errors=errors,
            name=(
                "bioimageio.spec format validation"
                f" {rd.type} {cls.implemented_format_version}"
            ),
            status=format_status,
            warnings=val_warnings,
        ),
        update_status=False,  # avoid updating status from 'valid-format' to 'passed', but ...
    )
    if format_status == "failed":
        # ... update status in case of failure
        rd.validation_summary.status = "failed"

    return rd

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

package ¤

package(
    dest: Optional[
        Union[ZipFile, IO[bytes], Path, str]
    ] = None,
) -> ZipFile

package the described resource as a zip archive

Parameters:

  • dest ¤
    (Optional[Union[ZipFile, IO[bytes], Path, str]], default: None ) –

    (path/bytes stream of) destination zipfile

Source code in src/bioimageio/spec/_internal/common_nodes.py
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
def package(
    self, dest: Optional[Union[ZipFile, IO[bytes], Path, str]] = None, /
) -> ZipFile:
    """package the described resource as a zip archive

    Args:
        dest: (path/bytes stream of) destination zipfile
    """
    if dest is None:
        dest = BytesIO()

    if isinstance(dest, ZipFile):
        zip = dest
        if "r" in zip.mode:
            raise ValueError(
                f"zip file {dest} opened in '{zip.mode}' mode,"
                + " but write access is needed for packaging."
            )
    else:
        zip = ZipFile(dest, mode="w")

    if zip.filename is None:
        zip.filename = (
            str(getattr(self, "id", getattr(self, "name", "bioimageio"))) + ".zip"
        )

    content = self.get_package_content()
    write_content_to_zip(content, zip)
    return zip

warn_about_tag_categories classmethod ¤

warn_about_tag_categories(
    value: List[str], info: ValidationInfo
) -> List[str]
Source code in src/bioimageio/spec/generic/v0_3.py
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
@as_warning
@field_validator("tags")
@classmethod
def warn_about_tag_categories(
    cls, value: List[str], info: ValidationInfo
) -> List[str]:
    categories = TAG_CATEGORIES.get(info.data["type"], {})
    missing_categories: List[Dict[str, Sequence[str]]] = []
    for cat, entries in categories.items():
        if not any(e in value for e in entries):
            missing_categories.append({cat: entries})

    if missing_categories:
        raise ValueError(
            f"Missing tags from bioimage.io categories: {missing_categories}"
        )

    return value

ModelId ¤

Bases: ResourceId


              flowchart TD
              bioimageio.spec.model.v0_5.ModelId[ModelId]
              bioimageio.spec.generic.v0_3.ResourceId[ResourceId]
              bioimageio.spec._internal.validated_string.ValidatedString[ValidatedString]

                              bioimageio.spec.generic.v0_3.ResourceId --> bioimageio.spec.model.v0_5.ModelId
                                bioimageio.spec._internal.validated_string.ValidatedString --> bioimageio.spec.generic.v0_3.ResourceId
                



              click bioimageio.spec.model.v0_5.ModelId href "" "bioimageio.spec.model.v0_5.ModelId"
              click bioimageio.spec.generic.v0_3.ResourceId href "" "bioimageio.spec.generic.v0_3.ResourceId"
              click bioimageio.spec._internal.validated_string.ValidatedString href "" "bioimageio.spec._internal.validated_string.ValidatedString"
            

Methods:

Attributes:

root_model class-attribute ¤

root_model: Type[RootModel[Any]] = RootModel[
    Annotated[
        NotEmpty[str],
        RestrictCharacters(
            string.ascii_lowercase + string.digits + "_-/."
        ),
        annotated_types.Predicate(
            lambda s: not (
                s.startswith("/") or s.endswith("/")
            )
        ),
    ]
]

__get_pydantic_core_schema__ classmethod ¤

__get_pydantic_core_schema__(
    source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema
Source code in src/bioimageio/spec/_internal/validated_string.py
29
30
31
32
33
@classmethod
def __get_pydantic_core_schema__(
    cls, source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema:
    return no_info_after_validator_function(cls, handler(str))

__get_pydantic_json_schema__ classmethod ¤

__get_pydantic_json_schema__(
    core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue
Source code in src/bioimageio/spec/_internal/validated_string.py
35
36
37
38
39
40
41
42
43
44
@classmethod
def __get_pydantic_json_schema__(
    cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
    json_schema = cls.root_model.model_json_schema(mode=handler.mode)
    json_schema["title"] = cls.__name__.strip("_")
    if cls.__doc__:
        json_schema["description"] = cls.__doc__

    return json_schema

__new__ ¤

__new__(object: object)
Source code in src/bioimageio/spec/_internal/validated_string.py
19
20
21
22
23
def __new__(cls, object: object):
    _validated = cls.root_model.model_validate(object).root
    self = super().__new__(cls, _validated)
    self._validated = _validated
    return self._after_validator()

NominalOrOrdinalDataDescr pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "values": {
      "anyOf": [
        {
          "items": {
            "type": "integer"
          },
          "minItems": 1,
          "type": "array"
        },
        {
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "type": "array"
        },
        {
          "items": {
            "type": "boolean"
          },
          "minItems": 1,
          "type": "array"
        },
        {
          "items": {
            "type": "string"
          },
          "minItems": 1,
          "type": "array"
        }
      ],
      "description": "A fixed set of nominal or an ascending sequence of ordinal values.\nIn this case `data.type` is required to be an unsigend integer type, e.g. 'uint8'.\nString `values` are interpreted as labels for tensor values 0, ..., N.\nNote: as YAML 1.2 does not natively support a \"set\" datatype,\nnominal values should be given as a sequence (aka list/array) as well.",
      "title": "Values"
    },
    "type": {
      "default": "uint8",
      "enum": [
        "float32",
        "float64",
        "uint8",
        "int8",
        "uint16",
        "int16",
        "uint32",
        "int32",
        "uint64",
        "int64",
        "bool"
      ],
      "examples": [
        "float32",
        "uint8",
        "uint16",
        "int64",
        "bool"
      ],
      "title": "Type",
      "type": "string"
    },
    "unit": {
      "anyOf": [
        {
          "const": "arbitrary unit",
          "type": "string"
        },
        {
          "description": "An SI unit",
          "minLength": 1,
          "pattern": "^(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?((\u00b7(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?)|(/(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^+?[1-9]\\d*)?))*$",
          "title": "SiUnit",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "title": "Unit"
    }
  },
  "required": [
    "values"
  ],
  "title": "model.v0_5.NominalOrOrdinalDataDescr",
  "type": "object"
}

Fields:

Validators:

  • _validate_values_match_type

range property ¤

range

type pydantic-field ¤

type: Annotated[
    NominalOrOrdinalDType,
    Field(
        examples=[
            "float32",
            "uint8",
            "uint16",
            "int64",
            "bool",
        ]
    ),
] = "uint8"

unit pydantic-field ¤

unit: Optional[Union[Literal["arbitrary unit"], SiUnit]] = (
    None
)

values pydantic-field ¤

values: TVs

A fixed set of nominal or an ascending sequence of ordinal values. In this case data.type is required to be an unsigend integer type, e.g. 'uint8'. String values are interpreted as labels for tensor values 0, ..., N. Note: as YAML 1.2 does not natively support a "set" datatype, nominal values should be given as a sequence (aka list/array) as well.

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

OnnxWeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_3.Author",
      "type": "object"
    },
    "FileDescr": {
      "additionalProperties": false,
      "description": "A file description",
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "File source",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        }
      },
      "required": [
        "source"
      ],
      "title": "_internal.io.FileDescr",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "Source of the weights file.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "comment": {
      "default": "",
      "description": "A comment about this weights entry, for example how these weights were created.",
      "title": "Comment",
      "type": "string"
    },
    "opset_version": {
      "description": "ONNX opset version",
      "minimum": 7,
      "title": "Opset Version",
      "type": "integer"
    },
    "external_data": {
      "anyOf": [
        {
          "$ref": "#/$defs/FileDescr",
          "examples": [
            {
              "source": "weights.onnx.data"
            }
          ]
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Source of the external ONNX data file holding the weights.\n(If present **source** holds the ONNX architecture without weights)."
    }
  },
  "required": [
    "source",
    "opset_version"
  ],
  "title": "model.v0_5.OnnxWeightsDescr",
  "type": "object"
}

Fields:

Validators:

  • _validate
  • _validate_external_data_unique_file_name

authors pydantic-field ¤

authors: Optional[List[Author]] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

comment pydantic-field ¤

comment: str = ''

A comment about this weights entry, for example how these weights were created.

external_data pydantic-field ¤

external_data: Optional[FileDescr_external_data] = None

Source of the external ONNX data file holding the weights. (If present source holds the ONNX architecture without weights).

opset_version pydantic-field ¤

opset_version: Annotated[int, Ge(7)]

ONNX opset version

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: Annotated[
    FileSource, AfterValidator(wo_special_file_name)
]

Source of the weights file.

type class-attribute ¤

type: WeightsFormat = 'onnx'

weights_format_name class-attribute ¤

weights_format_name: str = 'ONNX'

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

OutputTensorDescr pydantic-model ¤

Bases: TensorDescrBase[OutputAxis]

Show JSON schema:
{
  "$defs": {
    "BatchAxis": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "batch",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "batch",
          "title": "Type",
          "type": "string"
        },
        "size": {
          "anyOf": [
            {
              "const": 1,
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The batch size may be fixed to 1,\notherwise (the default) it may be chosen arbitrarily depending on available memory",
          "title": "Size"
        }
      },
      "required": [
        "type"
      ],
      "title": "model.v0_5.BatchAxis",
      "type": "object"
    },
    "BinarizeAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold values along `axis`",
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "title": "Threshold",
          "type": "array"
        },
        "axis": {
          "description": "The `threshold` axis",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "required": [
        "threshold",
        "axis"
      ],
      "title": "model.v0_5.BinarizeAlongAxisKwargs",
      "type": "object"
    },
    "BinarizeDescr": {
      "additionalProperties": false,
      "description": "Binarize the tensor with a fixed threshold.\n\nValues above `BinarizeKwargs.threshold`/`BinarizeAlongAxisKwargs.threshold`\nwill be set to one, values below the threshold to zero.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: binarize\n        kwargs:\n          axis: 'channel'\n          threshold: [0.25, 0.5, 0.75]\n    ```\n- in Python:\n    >>> postprocessing = [BinarizeDescr(\n    ...   kwargs=BinarizeAlongAxisKwargs(\n    ...       axis=AxisId('channel'),\n    ...       threshold=[0.25, 0.5, 0.75],\n    ...   )\n    ... )]",
      "properties": {
        "id": {
          "const": "binarize",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "anyOf": [
            {
              "$ref": "#/$defs/BinarizeKwargs"
            },
            {
              "$ref": "#/$defs/BinarizeAlongAxisKwargs"
            }
          ],
          "title": "Kwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.BinarizeDescr",
      "type": "object"
    },
    "BinarizeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `BinarizeDescr`",
      "properties": {
        "threshold": {
          "description": "The fixed threshold",
          "title": "Threshold",
          "type": "number"
        }
      },
      "required": [
        "threshold"
      ],
      "title": "model.v0_5.BinarizeKwargs",
      "type": "object"
    },
    "ChannelAxis": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "channel",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "channel",
          "title": "Type",
          "type": "string"
        },
        "channel_names": {
          "items": {
            "minLength": 1,
            "title": "Identifier",
            "type": "string"
          },
          "minItems": 1,
          "title": "Channel Names",
          "type": "array"
        }
      },
      "required": [
        "type",
        "channel_names"
      ],
      "title": "model.v0_5.ChannelAxis",
      "type": "object"
    },
    "ClipDescr": {
      "additionalProperties": false,
      "description": "Set tensor values below min to min and above max to max.\n\nSee `ScaleRangeDescr` for examples.",
      "properties": {
        "id": {
          "const": "clip",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ClipKwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.ClipDescr",
      "type": "object"
    },
    "ClipKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ClipDescr`",
      "properties": {
        "min": {
          "description": "minimum value for clipping",
          "title": "Min",
          "type": "number"
        },
        "max": {
          "description": "maximum value for clipping",
          "title": "Max",
          "type": "number"
        }
      },
      "required": [
        "min",
        "max"
      ],
      "title": "model.v0_4.ClipKwargs",
      "type": "object"
    },
    "DataDependentSize": {
      "additionalProperties": false,
      "properties": {
        "min": {
          "default": 1,
          "exclusiveMinimum": 0,
          "title": "Min",
          "type": "integer"
        },
        "max": {
          "anyOf": [
            {
              "exclusiveMinimum": 1,
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Max"
        }
      },
      "title": "model.v0_5.DataDependentSize",
      "type": "object"
    },
    "EnsureDtypeDescr": {
      "additionalProperties": false,
      "description": "Cast the tensor data type to `EnsureDtypeKwargs.dtype` (if not matching).\n\nThis can for example be used to ensure the inner neural network model gets a\ndifferent input tensor data type than the fully described bioimage.io model does.\n\nExamples:\n    The described bioimage.io model (incl. preprocessing) accepts any\n    float32-compatible tensor, normalizes it with percentiles and clipping and then\n    casts it to uint8, which is what the neural network in this example expects.\n    - in YAML\n        ```yaml\n        inputs:\n        - data:\n            type: float32  # described bioimage.io model is compatible with any float32 input tensor\n          preprocessing:\n          - id: scale_range\n              kwargs:\n              axes: ['y', 'x']\n              max_percentile: 99.8\n              min_percentile: 5.0\n          - id: clip\n              kwargs:\n              min: 0.0\n              max: 1.0\n          - id: ensure_dtype  # the neural network of the model requires uint8\n              kwargs:\n              dtype: uint8\n        ```\n    - in Python:\n        >>> preprocessing = [\n        ...     ScaleRangeDescr(\n        ...         kwargs=ScaleRangeKwargs(\n        ...           axes= (AxisId('y'), AxisId('x')),\n        ...           max_percentile= 99.8,\n        ...           min_percentile= 5.0,\n        ...         )\n        ...     ),\n        ...     ClipDescr(kwargs=ClipKwargs(min=0.0, max=1.0)),\n        ...     EnsureDtypeDescr(kwargs=EnsureDtypeKwargs(dtype=\"uint8\")),\n        ... ]",
      "properties": {
        "id": {
          "const": "ensure_dtype",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/EnsureDtypeKwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.EnsureDtypeDescr",
      "type": "object"
    },
    "EnsureDtypeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `EnsureDtypeDescr`",
      "properties": {
        "dtype": {
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64",
            "bool"
          ],
          "title": "Dtype",
          "type": "string"
        }
      },
      "required": [
        "dtype"
      ],
      "title": "model.v0_5.EnsureDtypeKwargs",
      "type": "object"
    },
    "FileDescr": {
      "additionalProperties": false,
      "description": "A file description",
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "File source",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        }
      },
      "required": [
        "source"
      ],
      "title": "_internal.io.FileDescr",
      "type": "object"
    },
    "FixedZeroMeanUnitVarianceAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `FixedZeroMeanUnitVarianceDescr`",
      "properties": {
        "mean": {
          "description": "The mean value(s) to normalize with.",
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "title": "Mean",
          "type": "array"
        },
        "std": {
          "description": "The standard deviation value(s) to normalize with.\nSize must match `mean` values.",
          "items": {
            "minimum": 1e-06,
            "type": "number"
          },
          "minItems": 1,
          "title": "Std",
          "type": "array"
        },
        "axis": {
          "description": "The axis of the mean/std values to normalize each entry along that dimension\nseparately.",
          "examples": [
            "channel",
            "index"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "required": [
        "mean",
        "std",
        "axis"
      ],
      "title": "model.v0_5.FixedZeroMeanUnitVarianceAlongAxisKwargs",
      "type": "object"
    },
    "FixedZeroMeanUnitVarianceDescr": {
      "additionalProperties": false,
      "description": "Subtract a given mean and divide by the standard deviation.\n\nNormalize with fixed, precomputed values for\n`FixedZeroMeanUnitVarianceKwargs.mean` and `FixedZeroMeanUnitVarianceKwargs.std`\nUse `FixedZeroMeanUnitVarianceAlongAxisKwargs` for independent scaling along given\naxes.\n\nExamples:\n1. scalar value for whole tensor\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: fixed_zero_mean_unit_variance\n        kwargs:\n          mean: 103.5\n          std: 13.7\n    ```\n    - in Python\n    >>> preprocessing = [FixedZeroMeanUnitVarianceDescr(\n    ...   kwargs=FixedZeroMeanUnitVarianceKwargs(mean=103.5, std=13.7)\n    ... )]\n\n2. independently along an axis\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: fixed_zero_mean_unit_variance\n        kwargs:\n          axis: channel\n          mean: [101.5, 102.5, 103.5]\n          std: [11.7, 12.7, 13.7]\n    ```\n    - in Python\n    >>> preprocessing = [FixedZeroMeanUnitVarianceDescr(\n    ...   kwargs=FixedZeroMeanUnitVarianceAlongAxisKwargs(\n    ...     axis=AxisId(\"channel\"),\n    ...     mean=[101.5, 102.5, 103.5],\n    ...     std=[11.7, 12.7, 13.7],\n    ...   )\n    ... )]",
      "properties": {
        "id": {
          "const": "fixed_zero_mean_unit_variance",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "anyOf": [
            {
              "$ref": "#/$defs/FixedZeroMeanUnitVarianceKwargs"
            },
            {
              "$ref": "#/$defs/FixedZeroMeanUnitVarianceAlongAxisKwargs"
            }
          ],
          "title": "Kwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.FixedZeroMeanUnitVarianceDescr",
      "type": "object"
    },
    "FixedZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `FixedZeroMeanUnitVarianceDescr`",
      "properties": {
        "mean": {
          "description": "The mean value to normalize with.",
          "title": "Mean",
          "type": "number"
        },
        "std": {
          "description": "The standard deviation value to normalize with.",
          "minimum": 1e-06,
          "title": "Std",
          "type": "number"
        }
      },
      "required": [
        "mean",
        "std"
      ],
      "title": "model.v0_5.FixedZeroMeanUnitVarianceKwargs",
      "type": "object"
    },
    "IndexOutputAxis": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "index",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "index",
          "title": "Type",
          "type": "string"
        },
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/SizeReference"
            },
            {
              "$ref": "#/$defs/DataDependentSize"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (`SizeReference`)\n- data dependent size using `DataDependentSize` (size is only known after model inference)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        }
      },
      "required": [
        "type",
        "size"
      ],
      "title": "model.v0_5.IndexOutputAxis",
      "type": "object"
    },
    "IntervalOrRatioDataDescr": {
      "additionalProperties": false,
      "properties": {
        "type": {
          "default": "float32",
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64"
          ],
          "examples": [
            "float32",
            "float64",
            "uint8",
            "uint16"
          ],
          "title": "Type",
          "type": "string"
        },
        "range": {
          "default": [
            null,
            null
          ],
          "description": "Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.\n`None` corresponds to min/max of what can be expressed by **type**.",
          "maxItems": 2,
          "minItems": 2,
          "prefixItems": [
            {
              "anyOf": [
                {
                  "type": "number"
                },
                {
                  "type": "null"
                }
              ]
            },
            {
              "anyOf": [
                {
                  "type": "number"
                },
                {
                  "type": "null"
                }
              ]
            }
          ],
          "title": "Range",
          "type": "array"
        },
        "unit": {
          "anyOf": [
            {
              "const": "arbitrary unit",
              "type": "string"
            },
            {
              "description": "An SI unit",
              "minLength": 1,
              "pattern": "^(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?((\u00b7(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?)|(/(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^+?[1-9]\\d*)?))*$",
              "title": "SiUnit",
              "type": "string"
            }
          ],
          "default": "arbitrary unit",
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "description": "Scale for data on an interval (or ratio) scale.",
          "title": "Scale",
          "type": "number"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Offset for data on a ratio scale.",
          "title": "Offset"
        }
      },
      "title": "model.v0_5.IntervalOrRatioDataDescr",
      "type": "object"
    },
    "NominalOrOrdinalDataDescr": {
      "additionalProperties": false,
      "properties": {
        "values": {
          "anyOf": [
            {
              "items": {
                "type": "integer"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "boolean"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "string"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "description": "A fixed set of nominal or an ascending sequence of ordinal values.\nIn this case `data.type` is required to be an unsigend integer type, e.g. 'uint8'.\nString `values` are interpreted as labels for tensor values 0, ..., N.\nNote: as YAML 1.2 does not natively support a \"set\" datatype,\nnominal values should be given as a sequence (aka list/array) as well.",
          "title": "Values"
        },
        "type": {
          "default": "uint8",
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64",
            "bool"
          ],
          "examples": [
            "float32",
            "uint8",
            "uint16",
            "int64",
            "bool"
          ],
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "const": "arbitrary unit",
              "type": "string"
            },
            {
              "description": "An SI unit",
              "minLength": 1,
              "pattern": "^(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?((\u00b7(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?)|(/(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^+?[1-9]\\d*)?))*$",
              "title": "SiUnit",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        }
      },
      "required": [
        "values"
      ],
      "title": "model.v0_5.NominalOrOrdinalDataDescr",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "ScaleLinearAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "Key word arguments for `ScaleLinearDescr`",
      "properties": {
        "axis": {
          "description": "The axis of gain and offset values.",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "gain": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "default": 0.0,
          "description": "additive term",
          "title": "Offset"
        }
      },
      "required": [
        "axis"
      ],
      "title": "model.v0_5.ScaleLinearAlongAxisKwargs",
      "type": "object"
    },
    "ScaleLinearDescr": {
      "additionalProperties": false,
      "description": "Fixed linear scaling.\n\nExamples:\n  1. Scale with scalar gain and offset\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_linear\n        kwargs:\n          gain: 2.0\n          offset: 3.0\n    ```\n    - in Python:\n    >>> preprocessing = [\n    ...     ScaleLinearDescr(kwargs=ScaleLinearKwargs(gain= 2.0, offset=3.0))\n    ... ]\n\n  2. Independent scaling along an axis\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_linear\n        kwargs:\n          axis: 'channel'\n          gain: [1.0, 2.0, 3.0]\n    ```\n    - in Python:\n    >>> preprocessing = [\n    ...     ScaleLinearDescr(\n    ...         kwargs=ScaleLinearAlongAxisKwargs(\n    ...             axis=AxisId(\"channel\"),\n    ...             gain=[1.0, 2.0, 3.0],\n    ...         )\n    ...     )\n    ... ]",
      "properties": {
        "id": {
          "const": "scale_linear",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "anyOf": [
            {
              "$ref": "#/$defs/ScaleLinearKwargs"
            },
            {
              "$ref": "#/$defs/ScaleLinearAlongAxisKwargs"
            }
          ],
          "title": "Kwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.ScaleLinearDescr",
      "type": "object"
    },
    "ScaleLinearKwargs": {
      "additionalProperties": false,
      "description": "Key word arguments for `ScaleLinearDescr`",
      "properties": {
        "gain": {
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain",
          "type": "number"
        },
        "offset": {
          "default": 0.0,
          "description": "additive term",
          "title": "Offset",
          "type": "number"
        }
      },
      "title": "model.v0_5.ScaleLinearKwargs",
      "type": "object"
    },
    "ScaleMeanVarianceDescr": {
      "additionalProperties": false,
      "description": "Scale a tensor's data distribution to match another tensor's mean/std.\n`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.`",
      "properties": {
        "id": {
          "const": "scale_mean_variance",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleMeanVarianceKwargs"
        }
      },
      "required": [
        "id",
        "kwargs"
      ],
      "title": "model.v0_5.ScaleMeanVarianceDescr",
      "type": "object"
    },
    "ScaleMeanVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleMeanVarianceKwargs`",
      "properties": {
        "reference_tensor": {
          "description": "Name of tensor to match.",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axes": {
          "anyOf": [
            {
              "items": {
                "maxLength": 16,
                "minLength": 1,
                "title": "AxisId",
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize samples independently, leave out the 'batch' axis.\nDefault: Scale all axes jointly.",
          "examples": [
            [
              "batch",
              "x",
              "y"
            ]
          ],
          "title": "Axes"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability:\n`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.`",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "required": [
        "reference_tensor"
      ],
      "title": "model.v0_5.ScaleMeanVarianceKwargs",
      "type": "object"
    },
    "ScaleRangeDescr": {
      "additionalProperties": false,
      "description": "Scale with percentiles.\n\nExamples:\n1. Scale linearly to map 5th percentile to 0 and 99.8th percentile to 1.0\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_range\n        kwargs:\n          axes: ['y', 'x']\n          max_percentile: 99.8\n          min_percentile: 5.0\n    ```\n    - in Python\n    >>> preprocessing = [\n    ...     ScaleRangeDescr(\n    ...         kwargs=ScaleRangeKwargs(\n    ...           axes= (AxisId('y'), AxisId('x')),\n    ...           max_percentile= 99.8,\n    ...           min_percentile= 5.0,\n    ...         )\n    ...     )\n    ... ]\n\n  2. Combine the above scaling with additional clipping to clip values outside the range given by the percentiles.\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_range\n        kwargs:\n          axes: ['y', 'x']\n          max_percentile: 99.8\n          min_percentile: 5.0\n              - id: scale_range\n       - id: clip\n         kwargs:\n          min: 0.0\n          max: 1.0\n    ```\n    - in Python\n    >>> preprocessing = [\n    ...   ScaleRangeDescr(\n    ...     kwargs=ScaleRangeKwargs(\n    ...       axes= (AxisId('y'), AxisId('x')),\n    ...       max_percentile= 99.8,\n    ...       min_percentile= 5.0,\n    ...     )\n    ...   ),\n    ...   ClipDescr(\n    ...     kwargs=ClipKwargs(\n    ...       min=0.0,\n    ...       max=1.0,\n    ...     )\n    ...   ),\n    ... ]",
      "properties": {
        "id": {
          "const": "scale_range",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ScaleRangeKwargs"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.ScaleRangeDescr",
      "type": "object"
    },
    "ScaleRangeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleRangeDescr`\n\nFor `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)\nthis processing step normalizes data to the [0, 1] intervall.\nFor other percentiles the normalized values will partially be outside the [0, 1]\nintervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the\nnormalized values to a range.",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "items": {
                "maxLength": 16,
                "minLength": 1,
                "title": "AxisId",
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute the min/max percentile value.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize samples independently, leave out the \"batch\" axis.\nDefault: Scale all axes jointly.",
          "examples": [
            [
              "batch",
              "x",
              "y"
            ]
          ],
          "title": "Axes"
        },
        "min_percentile": {
          "default": 0.0,
          "description": "The lower percentile used to determine the value to align with zero.",
          "exclusiveMaximum": 100,
          "minimum": 0,
          "title": "Min Percentile",
          "type": "number"
        },
        "max_percentile": {
          "default": 100.0,
          "description": "The upper percentile used to determine the value to align with one.\nHas to be bigger than `min_percentile`.\nThe range is 1 to 100 instead of 0 to 100 to avoid mistakenly\naccepting percentiles specified in the range 0.0 to 1.0.",
          "exclusiveMinimum": 1,
          "maximum": 100,
          "title": "Max Percentile",
          "type": "number"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability.\n`out = (tensor - v_lower) / (v_upper - v_lower + eps)`;\nwith `v_lower,v_upper` values at the respective percentiles.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        },
        "reference_tensor": {
          "anyOf": [
            {
              "maxLength": 32,
              "minLength": 1,
              "title": "TensorId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tensor ID to compute the percentiles from. Default: The tensor itself.\nFor any tensor in `inputs` only input tensor references are allowed.",
          "title": "Reference Tensor"
        }
      },
      "title": "model.v0_5.ScaleRangeKwargs",
      "type": "object"
    },
    "SigmoidDescr": {
      "additionalProperties": false,
      "description": "The logistic sigmoid function, a.k.a. expit function.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: sigmoid\n    ```\n- in Python:\n    >>> postprocessing = [SigmoidDescr()]",
      "properties": {
        "id": {
          "const": "sigmoid",
          "title": "Id",
          "type": "string"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.SigmoidDescr",
      "type": "object"
    },
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    },
    "SoftmaxDescr": {
      "additionalProperties": false,
      "description": "The softmax function.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: softmax\n        kwargs:\n          axis: channel\n    ```\n- in Python:\n    >>> postprocessing = [SoftmaxDescr(kwargs=SoftmaxKwargs(axis=AxisId(\"channel\")))]",
      "properties": {
        "id": {
          "const": "softmax",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/SoftmaxKwargs"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.SoftmaxDescr",
      "type": "object"
    },
    "SoftmaxKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `SoftmaxDescr`",
      "properties": {
        "axis": {
          "default": "channel",
          "description": "The axis to apply the softmax function along.\nNote:\n    Defaults to 'channel' axis\n    (which may not exist, in which case\n    a different axis id has to be specified).",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "title": "model.v0_5.SoftmaxKwargs",
      "type": "object"
    },
    "SpaceOutputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "x",
          "examples": [
            "x",
            "y",
            "z"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "space",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attometer",
                "angstrom",
                "centimeter",
                "decimeter",
                "exameter",
                "femtometer",
                "foot",
                "gigameter",
                "hectometer",
                "inch",
                "kilometer",
                "megameter",
                "meter",
                "micrometer",
                "mile",
                "millimeter",
                "nanometer",
                "parsec",
                "petameter",
                "picometer",
                "terameter",
                "yard",
                "yoctometer",
                "yottameter",
                "zeptometer",
                "zettameter"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.SpaceOutputAxis",
      "type": "object"
    },
    "SpaceOutputAxisWithHalo": {
      "additionalProperties": false,
      "properties": {
        "halo": {
          "description": "The halo should be cropped from the output tensor to avoid boundary effects.\nIt is to be cropped from both sides, i.e. `size_after_crop = size - 2 * halo`.\nTo document a halo that is already cropped by the model use `size.offset` instead.",
          "minimum": 1,
          "title": "Halo",
          "type": "integer"
        },
        "size": {
          "$ref": "#/$defs/SizeReference",
          "description": "reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ]
        },
        "id": {
          "default": "x",
          "examples": [
            "x",
            "y",
            "z"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "space",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attometer",
                "angstrom",
                "centimeter",
                "decimeter",
                "exameter",
                "femtometer",
                "foot",
                "gigameter",
                "hectometer",
                "inch",
                "kilometer",
                "megameter",
                "meter",
                "micrometer",
                "mile",
                "millimeter",
                "nanometer",
                "parsec",
                "petameter",
                "picometer",
                "terameter",
                "yard",
                "yoctometer",
                "yottameter",
                "zeptometer",
                "zettameter"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "halo",
        "size",
        "type"
      ],
      "title": "model.v0_5.SpaceOutputAxisWithHalo",
      "type": "object"
    },
    "TimeOutputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "time",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "time",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attosecond",
                "centisecond",
                "day",
                "decisecond",
                "exasecond",
                "femtosecond",
                "gigasecond",
                "hectosecond",
                "hour",
                "kilosecond",
                "megasecond",
                "microsecond",
                "millisecond",
                "minute",
                "nanosecond",
                "petasecond",
                "picosecond",
                "second",
                "terasecond",
                "yoctosecond",
                "yottasecond",
                "zeptosecond",
                "zettasecond"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.TimeOutputAxis",
      "type": "object"
    },
    "TimeOutputAxisWithHalo": {
      "additionalProperties": false,
      "properties": {
        "halo": {
          "description": "The halo should be cropped from the output tensor to avoid boundary effects.\nIt is to be cropped from both sides, i.e. `size_after_crop = size - 2 * halo`.\nTo document a halo that is already cropped by the model use `size.offset` instead.",
          "minimum": 1,
          "title": "Halo",
          "type": "integer"
        },
        "size": {
          "$ref": "#/$defs/SizeReference",
          "description": "reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ]
        },
        "id": {
          "default": "time",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "time",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attosecond",
                "centisecond",
                "day",
                "decisecond",
                "exasecond",
                "femtosecond",
                "gigasecond",
                "hectosecond",
                "hour",
                "kilosecond",
                "megasecond",
                "microsecond",
                "millisecond",
                "minute",
                "nanosecond",
                "petasecond",
                "picosecond",
                "second",
                "terasecond",
                "yoctosecond",
                "yottasecond",
                "zeptosecond",
                "zettasecond"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "halo",
        "size",
        "type"
      ],
      "title": "model.v0_5.TimeOutputAxisWithHalo",
      "type": "object"
    },
    "ZeroMeanUnitVarianceDescr": {
      "additionalProperties": false,
      "description": "Subtract mean and divide by variance.\n\nExamples:\n    Subtract tensor mean and variance\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: zero_mean_unit_variance\n    ```\n    - in Python\n    >>> preprocessing = [ZeroMeanUnitVarianceDescr()]",
      "properties": {
        "id": {
          "const": "zero_mean_unit_variance",
          "title": "Id",
          "type": "string"
        },
        "kwargs": {
          "$ref": "#/$defs/ZeroMeanUnitVarianceKwargs"
        }
      },
      "required": [
        "id"
      ],
      "title": "model.v0_5.ZeroMeanUnitVarianceDescr",
      "type": "object"
    },
    "ZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ZeroMeanUnitVarianceDescr`",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "items": {
                "maxLength": 16,
                "minLength": 1,
                "title": "AxisId",
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize each sample independently leave out the 'batch' axis.\nDefault: Scale all axes jointly.",
          "examples": [
            [
              "batch",
              "x",
              "y"
            ]
          ],
          "title": "Axes"
        },
        "eps": {
          "default": 1e-06,
          "description": "epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "title": "model.v0_5.ZeroMeanUnitVarianceKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "id": {
      "default": "output",
      "description": "Output tensor id.\nNo duplicates are allowed across all inputs and outputs.",
      "maxLength": 32,
      "minLength": 1,
      "title": "TensorId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "free text description",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "axes": {
      "description": "tensor axes",
      "items": {
        "discriminator": {
          "mapping": {
            "batch": "#/$defs/BatchAxis",
            "channel": "#/$defs/ChannelAxis",
            "index": "#/$defs/IndexOutputAxis",
            "space": {
              "oneOf": [
                {
                  "$ref": "#/$defs/SpaceOutputAxis"
                },
                {
                  "$ref": "#/$defs/SpaceOutputAxisWithHalo"
                }
              ]
            },
            "time": {
              "oneOf": [
                {
                  "$ref": "#/$defs/TimeOutputAxis"
                },
                {
                  "$ref": "#/$defs/TimeOutputAxisWithHalo"
                }
              ]
            }
          },
          "propertyName": "type"
        },
        "oneOf": [
          {
            "$ref": "#/$defs/BatchAxis"
          },
          {
            "$ref": "#/$defs/ChannelAxis"
          },
          {
            "$ref": "#/$defs/IndexOutputAxis"
          },
          {
            "oneOf": [
              {
                "$ref": "#/$defs/TimeOutputAxis"
              },
              {
                "$ref": "#/$defs/TimeOutputAxisWithHalo"
              }
            ]
          },
          {
            "oneOf": [
              {
                "$ref": "#/$defs/SpaceOutputAxis"
              },
              {
                "$ref": "#/$defs/SpaceOutputAxisWithHalo"
              }
            ]
          }
        ]
      },
      "minItems": 1,
      "title": "Axes",
      "type": "array"
    },
    "test_tensor": {
      "anyOf": [
        {
          "$ref": "#/$defs/FileDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "An example tensor to use for testing.\nUsing the model with the test input tensors is expected to yield the test output tensors.\nEach test tensor has be a an ndarray in the\n[numpy.lib file format](https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html#module-numpy.lib.format).\nThe file extension must be '.npy'."
    },
    "sample_tensor": {
      "anyOf": [
        {
          "$ref": "#/$defs/FileDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "A sample tensor to illustrate a possible input/output for the model,\nThe sample image primarily serves to inform a human user about an example use case\nand is typically stored as .hdf5, .png or .tiff.\nIt has to be readable by the [imageio library](https://imageio.readthedocs.io/en/stable/formats/index.html#supported-formats)\n(numpy's `.npy` format is not supported).\nThe image dimensionality has to match the number of axes specified in this tensor description."
    },
    "data": {
      "anyOf": [
        {
          "$ref": "#/$defs/NominalOrOrdinalDataDescr"
        },
        {
          "$ref": "#/$defs/IntervalOrRatioDataDescr"
        },
        {
          "items": {
            "anyOf": [
              {
                "$ref": "#/$defs/NominalOrOrdinalDataDescr"
              },
              {
                "$ref": "#/$defs/IntervalOrRatioDataDescr"
              }
            ]
          },
          "minItems": 1,
          "type": "array"
        }
      ],
      "default": {
        "type": "float32",
        "range": [
          null,
          null
        ],
        "unit": "arbitrary unit",
        "scale": 1.0,
        "offset": null
      },
      "description": "Description of the tensor's data values, optionally per channel.\nIf specified per channel, the data `type` needs to match across channels.",
      "title": "Data"
    },
    "postprocessing": {
      "description": "Description of how this output should be postprocessed.\n\nnote: `postprocessing` always ends with an 'ensure_dtype' operation.\n      If not given this is added to cast to this tensor's `data.type`.",
      "items": {
        "discriminator": {
          "mapping": {
            "binarize": "#/$defs/BinarizeDescr",
            "clip": "#/$defs/ClipDescr",
            "ensure_dtype": "#/$defs/EnsureDtypeDescr",
            "fixed_zero_mean_unit_variance": "#/$defs/FixedZeroMeanUnitVarianceDescr",
            "scale_linear": "#/$defs/ScaleLinearDescr",
            "scale_mean_variance": "#/$defs/ScaleMeanVarianceDescr",
            "scale_range": "#/$defs/ScaleRangeDescr",
            "sigmoid": "#/$defs/SigmoidDescr",
            "softmax": "#/$defs/SoftmaxDescr",
            "zero_mean_unit_variance": "#/$defs/ZeroMeanUnitVarianceDescr"
          },
          "propertyName": "id"
        },
        "oneOf": [
          {
            "$ref": "#/$defs/BinarizeDescr"
          },
          {
            "$ref": "#/$defs/ClipDescr"
          },
          {
            "$ref": "#/$defs/EnsureDtypeDescr"
          },
          {
            "$ref": "#/$defs/FixedZeroMeanUnitVarianceDescr"
          },
          {
            "$ref": "#/$defs/ScaleLinearDescr"
          },
          {
            "$ref": "#/$defs/ScaleMeanVarianceDescr"
          },
          {
            "$ref": "#/$defs/ScaleRangeDescr"
          },
          {
            "$ref": "#/$defs/SigmoidDescr"
          },
          {
            "$ref": "#/$defs/SoftmaxDescr"
          },
          {
            "$ref": "#/$defs/ZeroMeanUnitVarianceDescr"
          }
        ]
      },
      "title": "Postprocessing",
      "type": "array"
    }
  },
  "required": [
    "axes"
  ],
  "title": "model.v0_5.OutputTensorDescr",
  "type": "object"
}

Fields:

Validators:

  • _validate_axesaxes
  • _validate_sample_tensor
  • _check_data_type_across_channelsdata
  • _check_data_matches_channelaxis
  • _validate_postprocessing_kwargs

axes pydantic-field ¤

axes: NotEmpty[Sequence[IO_AxisT]]

tensor axes

data pydantic-field ¤

data: Union[
    TensorDataDescr, NotEmpty[Sequence[TensorDataDescr]]
]

Description of the tensor's data values, optionally per channel. If specified per channel, the data type needs to match across channels.

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

free text description

dtype property ¤

dtype: Literal[
    "float32",
    "float64",
    "uint8",
    "int8",
    "uint16",
    "int16",
    "uint32",
    "int32",
    "uint64",
    "int64",
    "bool",
]

dtype as specified under data.type or data[i].type

id pydantic-field ¤

Output tensor id. No duplicates are allowed across all inputs and outputs.

postprocessing pydantic-field ¤

postprocessing: List[PostprocessingDescr]

Description of how this output should be postprocessed.

postprocessing always ends with an 'ensure_dtype' operation.

If not given this is added to cast to this tensor's data.type.

sample_tensor pydantic-field ¤

sample_tensor: FAIR[Optional[FileDescr_]] = None

A sample tensor to illustrate a possible input/output for the model, The sample image primarily serves to inform a human user about an example use case and is typically stored as .hdf5, .png or .tiff. It has to be readable by the imageio library (numpy's .npy format is not supported). The image dimensionality has to match the number of axes specified in this tensor description.

shape property ¤

shape

test_tensor pydantic-field ¤

test_tensor: FAIR[Optional[FileDescr_]] = None

An example tensor to use for testing. Using the model with the test input tensors is expected to yield the test output tensors. Each test tensor has be a an ndarray in the numpy.lib file format. The file extension must be '.npy'.

get_axis_sizes_for_array ¤

get_axis_sizes_for_array(
    array: NDArray[Any],
) -> Dict[AxisId, int]
Source code in src/bioimageio/spec/model/v0_5.py
1684
1685
1686
1687
1688
1689
1690
def get_axis_sizes_for_array(self, array: NDArray[Any]) -> Dict[AxisId, int]:
    if len(array.shape) != len(self.axes):
        raise ValueError(
            f"Dimension mismatch: array shape {array.shape} (#{len(array.shape)})"
            + f" incompatible with {len(self.axes)} axes."
        )
    return {a.id: array.shape[i] for i, a in enumerate(self.axes)}

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ParameterizedSize pydantic-model ¤

Bases: Node

Describes a range of valid tensor axis sizes as size = min + n*step.

  • min and step are given by the model description.
  • All blocksize paramters n = 0,1,2,... yield a valid size.
  • A greater blocksize paramter n = 0,1,2,... results in a greater size. This allows to adjust the axis size more generically.
Show JSON schema:
{
  "additionalProperties": false,
  "description": "Describes a range of valid tensor axis sizes as `size = min + n*step`.\n\n- **min** and **step** are given by the model description.\n- All blocksize paramters n = 0,1,2,... yield a valid `size`.\n- A greater blocksize paramter n = 0,1,2,... results in a greater **size**.\n  This allows to adjust the axis size more generically.",
  "properties": {
    "min": {
      "exclusiveMinimum": 0,
      "title": "Min",
      "type": "integer"
    },
    "step": {
      "exclusiveMinimum": 0,
      "title": "Step",
      "type": "integer"
    }
  },
  "required": [
    "min",
    "step"
  ],
  "title": "model.v0_5.ParameterizedSize",
  "type": "object"
}

Fields:

  • min (Annotated[int, Gt(0)])
  • step (Annotated[int, Gt(0)])

N class-attribute ¤

N: Type[int] = ParameterizedSize_N

Positive integer to parameterize this axis

min pydantic-field ¤

min: Annotated[int, Gt(0)]

step pydantic-field ¤

step: Annotated[int, Gt(0)]

get_n ¤

get_n(s: int) -> ParameterizedSize_N

return smallest n parameterizing a size greater or equal than s

Source code in src/bioimageio/spec/model/v0_5.py
327
328
329
def get_n(self, s: int) -> ParameterizedSize_N:
    """return smallest n parameterizing a size greater or equal than `s`"""
    return ceil((s - self.min) / self.step)

get_size ¤

get_size(n: ParameterizedSize_N) -> int
Source code in src/bioimageio/spec/model/v0_5.py
324
325
def get_size(self, n: ParameterizedSize_N) -> int:
    return self.min + self.step * n

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_size ¤

validate_size(size: int) -> int
Source code in src/bioimageio/spec/model/v0_5.py
313
314
315
316
317
318
319
320
321
322
def validate_size(self, size: int) -> int:
    if size < self.min:
        raise ValueError(f"size {size} < {self.min}")
    if (size - self.min) % self.step != 0:
        raise ValueError(
            f"axis of size {size} is not parameterized by `min + n*step` ="
            + f" `{self.min} + n*{self.step}`"
        )

    return size

ProcessingDescrBase pydantic-model ¤

Bases: NodeWithExplicitlySetFields, ABC

processing base class

Show JSON schema:
{
  "additionalProperties": false,
  "description": "processing base class",
  "properties": {},
  "title": "model.v0_5.ProcessingDescrBase",
  "type": "object"
}

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

PytorchStateDictWeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "ArchitectureFromFileDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "Architecture source file",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "callable": {
          "description": "Identifier of the callable that returns a torch.nn.Module instance.",
          "examples": [
            "MyNetworkClass",
            "get_my_model"
          ],
          "minLength": 1,
          "title": "Identifier",
          "type": "string"
        },
        "kwargs": {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "description": "key word arguments for the `callable`",
          "title": "Kwargs",
          "type": "object"
        }
      },
      "required": [
        "source",
        "callable"
      ],
      "title": "model.v0_5.ArchitectureFromFileDescr",
      "type": "object"
    },
    "ArchitectureFromLibraryDescr": {
      "additionalProperties": false,
      "properties": {
        "callable": {
          "description": "Identifier of the callable that returns a torch.nn.Module instance.",
          "examples": [
            "MyNetworkClass",
            "get_my_model"
          ],
          "minLength": 1,
          "title": "Identifier",
          "type": "string"
        },
        "kwargs": {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "description": "key word arguments for the `callable`",
          "title": "Kwargs",
          "type": "object"
        },
        "import_from": {
          "description": "Where to import the callable from, i.e. `from <import_from> import <callable>`",
          "title": "Import From",
          "type": "string"
        }
      },
      "required": [
        "callable",
        "import_from"
      ],
      "title": "model.v0_5.ArchitectureFromLibraryDescr",
      "type": "object"
    },
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_3.Author",
      "type": "object"
    },
    "FileDescr": {
      "additionalProperties": false,
      "description": "A file description",
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "File source",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        }
      },
      "required": [
        "source"
      ],
      "title": "_internal.io.FileDescr",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    },
    "YamlValue": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "format": "date",
          "type": "string"
        },
        {
          "format": "date-time",
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        },
        {
          "type": "string"
        },
        {
          "items": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "array"
        },
        {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "object"
        },
        {
          "type": "null"
        }
      ]
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "Source of the weights file.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "comment": {
      "default": "",
      "description": "A comment about this weights entry, for example how these weights were created.",
      "title": "Comment",
      "type": "string"
    },
    "architecture": {
      "anyOf": [
        {
          "$ref": "#/$defs/ArchitectureFromFileDescr"
        },
        {
          "$ref": "#/$defs/ArchitectureFromLibraryDescr"
        }
      ],
      "title": "Architecture"
    },
    "pytorch_version": {
      "$ref": "#/$defs/Version",
      "description": "Version of the PyTorch library used.\nIf `architecture.depencencies` is specified it has to include pytorch and any version pinning has to be compatible."
    },
    "dependencies": {
      "anyOf": [
        {
          "$ref": "#/$defs/FileDescr",
          "examples": [
            {
              "source": "environment.yaml"
            }
          ]
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Custom depencies beyond pytorch described in a Conda environment file.\nAllows to specify custom dependencies, see conda docs:\n- [Exporting an environment file across platforms](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#exporting-an-environment-file-across-platforms)\n- [Creating an environment file manually](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-file-manually)\n\nThe conda environment file should include pytorch and any version pinning has to be compatible with\n**pytorch_version**."
    }
  },
  "required": [
    "source",
    "architecture",
    "pytorch_version"
  ],
  "title": "model.v0_5.PytorchStateDictWeightsDescr",
  "type": "object"
}

Fields:

Validators:

  • _validate

architecture pydantic-field ¤

authors pydantic-field ¤

authors: Optional[List[Author]] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

comment pydantic-field ¤

comment: str = ''

A comment about this weights entry, for example how these weights were created.

dependencies pydantic-field ¤

dependencies: Optional[FileDescr_dependencies] = None

Custom depencies beyond pytorch described in a Conda environment file. Allows to specify custom dependencies, see conda docs: - Exporting an environment file across platforms - Creating an environment file manually

The conda environment file should include pytorch and any version pinning has to be compatible with pytorch_version.

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

pytorch_version pydantic-field ¤

pytorch_version: Version

Version of the PyTorch library used. If architecture.depencencies is specified it has to include pytorch and any version pinning has to be compatible.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: Annotated[
    FileSource, AfterValidator(wo_special_file_name)
]

Source of the weights file.

type class-attribute ¤

type: WeightsFormat = 'pytorch_state_dict'

weights_format_name class-attribute ¤

weights_format_name: str = 'Pytorch State Dict'

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

ReproducibilityTolerance pydantic-model ¤

Bases: Node

Describes what small numerical differences -- if any -- may be tolerated in the generated output when executing in different environments.

A tensor element output is considered mismatched to the test_tensor if abs(output - test_tensor) > absolute_tolerance + relative_tolerance * abs(test_tensor). (Internally we call numpy.testing.assert_allclose.)

Motivation

For testing we can request the respective deep learning frameworks to be as reproducible as possible by setting seeds and chosing deterministic algorithms, but differences in operating systems, available hardware and installed drivers may still lead to numerical differences.

Show JSON schema:
{
  "additionalProperties": true,
  "description": "Describes what small numerical differences -- if any -- may be tolerated\nin the generated output when executing in different environments.\n\nA tensor element *output* is considered mismatched to the **test_tensor** if\nabs(*output* - **test_tensor**) > **absolute_tolerance** + **relative_tolerance** * abs(**test_tensor**).\n(Internally we call [numpy.testing.assert_allclose](https://numpy.org/doc/stable/reference/generated/numpy.testing.assert_allclose.html).)\n\nMotivation:\n    For testing we can request the respective deep learning frameworks to be as\n    reproducible as possible by setting seeds and chosing deterministic algorithms,\n    but differences in operating systems, available hardware and installed drivers\n    may still lead to numerical differences.",
  "properties": {
    "relative_tolerance": {
      "default": 0.001,
      "description": "Maximum relative tolerance of reproduced test tensor.",
      "maximum": 0.01,
      "minimum": 0,
      "title": "Relative Tolerance",
      "type": "number"
    },
    "absolute_tolerance": {
      "default": 0.0001,
      "description": "Maximum absolute tolerance of reproduced test tensor.",
      "minimum": 0,
      "title": "Absolute Tolerance",
      "type": "number"
    },
    "mismatched_elements_per_million": {
      "default": 100,
      "description": "Maximum number of mismatched elements/pixels per million to tolerate.",
      "maximum": 1000,
      "minimum": 0,
      "title": "Mismatched Elements Per Million",
      "type": "integer"
    },
    "output_ids": {
      "default": [],
      "description": "Limits the output tensor IDs these reproducibility details apply to.",
      "items": {
        "maxLength": 32,
        "minLength": 1,
        "title": "TensorId",
        "type": "string"
      },
      "title": "Output Ids",
      "type": "array"
    },
    "weights_formats": {
      "default": [],
      "description": "Limits the weights formats these details apply to.",
      "items": {
        "enum": [
          "keras_hdf5",
          "onnx",
          "pytorch_state_dict",
          "tensorflow_js",
          "tensorflow_saved_model_bundle",
          "torchscript"
        ],
        "type": "string"
      },
      "title": "Weights Formats",
      "type": "array"
    }
  },
  "title": "model.v0_5.ReproducibilityTolerance",
  "type": "object"
}

Fields:

absolute_tolerance pydantic-field ¤

absolute_tolerance: AbsoluteTolerance = 0.0001

Maximum absolute tolerance of reproduced test tensor.

mismatched_elements_per_million pydantic-field ¤

mismatched_elements_per_million: MismatchedElementsPerMillion = 100

Maximum number of mismatched elements/pixels per million to tolerate.

output_ids pydantic-field ¤

output_ids: Sequence[TensorId] = ()

Limits the output tensor IDs these reproducibility details apply to.

relative_tolerance pydantic-field ¤

relative_tolerance: RelativeTolerance = 0.001

Maximum relative tolerance of reproduced test tensor.

weights_formats pydantic-field ¤

weights_formats: Sequence[WeightsFormat] = ()

Limits the weights formats these details apply to.

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleLinearAlongAxisKwargs pydantic-model ¤

Bases: ProcessingKwargs

Key word arguments for ScaleLinearDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "Key word arguments for `ScaleLinearDescr`",
  "properties": {
    "axis": {
      "description": "The axis of gain and offset values.",
      "examples": [
        "channel"
      ],
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "gain": {
      "anyOf": [
        {
          "type": "number"
        },
        {
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "type": "array"
        }
      ],
      "default": 1.0,
      "description": "multiplicative factor",
      "title": "Gain"
    },
    "offset": {
      "anyOf": [
        {
          "type": "number"
        },
        {
          "items": {
            "type": "number"
          },
          "minItems": 1,
          "type": "array"
        }
      ],
      "default": 0.0,
      "description": "additive term",
      "title": "Offset"
    }
  },
  "required": [
    "axis"
  ],
  "title": "model.v0_5.ScaleLinearAlongAxisKwargs",
  "type": "object"
}

Fields:

Validators:

  • _validate

axis pydantic-field ¤

axis: Annotated[NonBatchAxisId, Field(examples=["channel"])]

The axis of gain and offset values.

gain pydantic-field ¤

gain: Union[float, NotEmpty[List[float]]] = 1.0

multiplicative factor

offset pydantic-field ¤

offset: Union[float, NotEmpty[List[float]]] = 0.0

additive term

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleLinearDescr pydantic-model ¤

Bases: ProcessingDescrBase

Fixed linear scaling.

Examples:

  1. Scale with scalar gain and offset
  2. in YAML
    preprocessing:
      - id: scale_linear
        kwargs:
          gain: 2.0
          offset: 3.0
    
  3. in Python:

    preprocessing = [ ... ScaleLinearDescr(kwargs=ScaleLinearKwargs(gain= 2.0, offset=3.0)) ... ]

  4. Independent scaling along an axis

  5. in YAML
    preprocessing:
      - id: scale_linear
        kwargs:
          axis: 'channel'
          gain: [1.0, 2.0, 3.0]
    
  6. in Python:

    preprocessing = [ ... ScaleLinearDescr( ... kwargs=ScaleLinearAlongAxisKwargs( ... axis=AxisId("channel"), ... gain=[1.0, 2.0, 3.0], ... ) ... ) ... ]

Show JSON schema:
{
  "$defs": {
    "ScaleLinearAlongAxisKwargs": {
      "additionalProperties": false,
      "description": "Key word arguments for `ScaleLinearDescr`",
      "properties": {
        "axis": {
          "description": "The axis of gain and offset values.",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "gain": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "default": 0.0,
          "description": "additive term",
          "title": "Offset"
        }
      },
      "required": [
        "axis"
      ],
      "title": "model.v0_5.ScaleLinearAlongAxisKwargs",
      "type": "object"
    },
    "ScaleLinearKwargs": {
      "additionalProperties": false,
      "description": "Key word arguments for `ScaleLinearDescr`",
      "properties": {
        "gain": {
          "default": 1.0,
          "description": "multiplicative factor",
          "title": "Gain",
          "type": "number"
        },
        "offset": {
          "default": 0.0,
          "description": "additive term",
          "title": "Offset",
          "type": "number"
        }
      },
      "title": "model.v0_5.ScaleLinearKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Fixed linear scaling.\n\nExamples:\n  1. Scale with scalar gain and offset\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_linear\n        kwargs:\n          gain: 2.0\n          offset: 3.0\n    ```\n    - in Python:\n    >>> preprocessing = [\n    ...     ScaleLinearDescr(kwargs=ScaleLinearKwargs(gain= 2.0, offset=3.0))\n    ... ]\n\n  2. Independent scaling along an axis\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_linear\n        kwargs:\n          axis: 'channel'\n          gain: [1.0, 2.0, 3.0]\n    ```\n    - in Python:\n    >>> preprocessing = [\n    ...     ScaleLinearDescr(\n    ...         kwargs=ScaleLinearAlongAxisKwargs(\n    ...             axis=AxisId(\"channel\"),\n    ...             gain=[1.0, 2.0, 3.0],\n    ...         )\n    ...     )\n    ... ]",
  "properties": {
    "id": {
      "const": "scale_linear",
      "title": "Id",
      "type": "string"
    },
    "kwargs": {
      "anyOf": [
        {
          "$ref": "#/$defs/ScaleLinearKwargs"
        },
        {
          "$ref": "#/$defs/ScaleLinearAlongAxisKwargs"
        }
      ],
      "title": "Kwargs"
    }
  },
  "required": [
    "id",
    "kwargs"
  ],
  "title": "model.v0_5.ScaleLinearDescr",
  "type": "object"
}

Fields:

id pydantic-field ¤

id: Literal['scale_linear'] = 'scale_linear'

implemented_id class-attribute ¤

implemented_id: Literal['scale_linear'] = 'scale_linear'

kwargs pydantic-field ¤

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleLinearKwargs pydantic-model ¤

Bases: ProcessingKwargs

Key word arguments for ScaleLinearDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "Key word arguments for `ScaleLinearDescr`",
  "properties": {
    "gain": {
      "default": 1.0,
      "description": "multiplicative factor",
      "title": "Gain",
      "type": "number"
    },
    "offset": {
      "default": 0.0,
      "description": "additive term",
      "title": "Offset",
      "type": "number"
    }
  },
  "title": "model.v0_5.ScaleLinearKwargs",
  "type": "object"
}

Fields:

Validators:

  • _validate

gain pydantic-field ¤

gain: float = 1.0

multiplicative factor

offset pydantic-field ¤

offset: float = 0.0

additive term

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleMeanVarianceDescr pydantic-model ¤

Bases: ProcessingDescrBase

Scale a tensor's data distribution to match another tensor's mean/std. out = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.

Show JSON schema:
{
  "$defs": {
    "ScaleMeanVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleMeanVarianceKwargs`",
      "properties": {
        "reference_tensor": {
          "description": "Name of tensor to match.",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axes": {
          "anyOf": [
            {
              "items": {
                "maxLength": 16,
                "minLength": 1,
                "title": "AxisId",
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize samples independently, leave out the 'batch' axis.\nDefault: Scale all axes jointly.",
          "examples": [
            [
              "batch",
              "x",
              "y"
            ]
          ],
          "title": "Axes"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability:\n`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.`",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "required": [
        "reference_tensor"
      ],
      "title": "model.v0_5.ScaleMeanVarianceKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Scale a tensor's data distribution to match another tensor's mean/std.\n`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.`",
  "properties": {
    "id": {
      "const": "scale_mean_variance",
      "title": "Id",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/ScaleMeanVarianceKwargs"
    }
  },
  "required": [
    "id",
    "kwargs"
  ],
  "title": "model.v0_5.ScaleMeanVarianceDescr",
  "type": "object"
}

Fields:

id pydantic-field ¤

id: Literal['scale_mean_variance'] = 'scale_mean_variance'

implemented_id class-attribute ¤

implemented_id: Literal["scale_mean_variance"] = (
    "scale_mean_variance"
)

kwargs pydantic-field ¤

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleMeanVarianceKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for ScaleMeanVarianceKwargs

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `ScaleMeanVarianceKwargs`",
  "properties": {
    "reference_tensor": {
      "description": "Name of tensor to match.",
      "maxLength": 32,
      "minLength": 1,
      "title": "TensorId",
      "type": "string"
    },
    "axes": {
      "anyOf": [
        {
          "items": {
            "maxLength": 16,
            "minLength": 1,
            "title": "AxisId",
            "type": "string"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize samples independently, leave out the 'batch' axis.\nDefault: Scale all axes jointly.",
      "examples": [
        [
          "batch",
          "x",
          "y"
        ]
      ],
      "title": "Axes"
    },
    "eps": {
      "default": 1e-06,
      "description": "Epsilon for numeric stability:\n`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.`",
      "exclusiveMinimum": 0,
      "maximum": 0.1,
      "title": "Eps",
      "type": "number"
    }
  },
  "required": [
    "reference_tensor"
  ],
  "title": "model.v0_5.ScaleMeanVarianceKwargs",
  "type": "object"
}

Fields:

axes pydantic-field ¤

axes: Annotated[
    Optional[Sequence[AxisId]],
    Field(examples=[("batch", "x", "y")]),
] = None

The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std. For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x') resulting in a tensor of equal shape normalized per channel, specify axes=('batch', 'x', 'y'). To normalize samples independently, leave out the 'batch' axis. Default: Scale all axes jointly.

eps pydantic-field ¤

eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-06

Epsilon for numeric stability: out = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.

reference_tensor pydantic-field ¤

reference_tensor: TensorId

Name of tensor to match.

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleRangeDescr pydantic-model ¤

Bases: ProcessingDescrBase

Scale with percentiles.

Examples:

  1. Scale linearly to map 5th percentile to 0 and 99.8th percentile to 1.0

    • in YAML
      preprocessing:
        - id: scale_range
          kwargs:
            axes: ['y', 'x']
            max_percentile: 99.8
            min_percentile: 5.0
      
    • in Python

      preprocessing = [ ... ScaleRangeDescr( ... kwargs=ScaleRangeKwargs( ... axes= (AxisId('y'), AxisId('x')), ... max_percentile= 99.8, ... min_percentile= 5.0, ... ) ... ) ... ]

  2. Combine the above scaling with additional clipping to clip values outside the range given by the percentiles.

    • in YAML
      preprocessing:
        - id: scale_range
          kwargs:
            axes: ['y', 'x']
            max_percentile: 99.8
            min_percentile: 5.0
                - id: scale_range
         - id: clip
           kwargs:
            min: 0.0
            max: 1.0
      
    • in Python

      preprocessing = [ ... ScaleRangeDescr( ... kwargs=ScaleRangeKwargs( ... axes= (AxisId('y'), AxisId('x')), ... max_percentile= 99.8, ... min_percentile= 5.0, ... ) ... ), ... ClipDescr( ... kwargs=ClipKwargs( ... min=0.0, ... max=1.0, ... ) ... ), ... ]

Show JSON schema:
{
  "$defs": {
    "ScaleRangeKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ScaleRangeDescr`\n\nFor `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)\nthis processing step normalizes data to the [0, 1] intervall.\nFor other percentiles the normalized values will partially be outside the [0, 1]\nintervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the\nnormalized values to a range.",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "items": {
                "maxLength": 16,
                "minLength": 1,
                "title": "AxisId",
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute the min/max percentile value.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize samples independently, leave out the \"batch\" axis.\nDefault: Scale all axes jointly.",
          "examples": [
            [
              "batch",
              "x",
              "y"
            ]
          ],
          "title": "Axes"
        },
        "min_percentile": {
          "default": 0.0,
          "description": "The lower percentile used to determine the value to align with zero.",
          "exclusiveMaximum": 100,
          "minimum": 0,
          "title": "Min Percentile",
          "type": "number"
        },
        "max_percentile": {
          "default": 100.0,
          "description": "The upper percentile used to determine the value to align with one.\nHas to be bigger than `min_percentile`.\nThe range is 1 to 100 instead of 0 to 100 to avoid mistakenly\naccepting percentiles specified in the range 0.0 to 1.0.",
          "exclusiveMinimum": 1,
          "maximum": 100,
          "title": "Max Percentile",
          "type": "number"
        },
        "eps": {
          "default": 1e-06,
          "description": "Epsilon for numeric stability.\n`out = (tensor - v_lower) / (v_upper - v_lower + eps)`;\nwith `v_lower,v_upper` values at the respective percentiles.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        },
        "reference_tensor": {
          "anyOf": [
            {
              "maxLength": 32,
              "minLength": 1,
              "title": "TensorId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tensor ID to compute the percentiles from. Default: The tensor itself.\nFor any tensor in `inputs` only input tensor references are allowed.",
          "title": "Reference Tensor"
        }
      },
      "title": "model.v0_5.ScaleRangeKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Scale with percentiles.\n\nExamples:\n1. Scale linearly to map 5th percentile to 0 and 99.8th percentile to 1.0\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_range\n        kwargs:\n          axes: ['y', 'x']\n          max_percentile: 99.8\n          min_percentile: 5.0\n    ```\n    - in Python\n    >>> preprocessing = [\n    ...     ScaleRangeDescr(\n    ...         kwargs=ScaleRangeKwargs(\n    ...           axes= (AxisId('y'), AxisId('x')),\n    ...           max_percentile= 99.8,\n    ...           min_percentile= 5.0,\n    ...         )\n    ...     )\n    ... ]\n\n  2. Combine the above scaling with additional clipping to clip values outside the range given by the percentiles.\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: scale_range\n        kwargs:\n          axes: ['y', 'x']\n          max_percentile: 99.8\n          min_percentile: 5.0\n              - id: scale_range\n       - id: clip\n         kwargs:\n          min: 0.0\n          max: 1.0\n    ```\n    - in Python\n    >>> preprocessing = [\n    ...   ScaleRangeDescr(\n    ...     kwargs=ScaleRangeKwargs(\n    ...       axes= (AxisId('y'), AxisId('x')),\n    ...       max_percentile= 99.8,\n    ...       min_percentile= 5.0,\n    ...     )\n    ...   ),\n    ...   ClipDescr(\n    ...     kwargs=ClipKwargs(\n    ...       min=0.0,\n    ...       max=1.0,\n    ...     )\n    ...   ),\n    ... ]",
  "properties": {
    "id": {
      "const": "scale_range",
      "title": "Id",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/ScaleRangeKwargs"
    }
  },
  "required": [
    "id"
  ],
  "title": "model.v0_5.ScaleRangeDescr",
  "type": "object"
}

Fields:

id pydantic-field ¤

id: Literal['scale_range'] = 'scale_range'

implemented_id class-attribute ¤

implemented_id: Literal['scale_range'] = 'scale_range'

kwargs pydantic-field ¤

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ScaleRangeKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for ScaleRangeDescr

For min_percentile=0.0 (the default) and max_percentile=100 (the default) this processing step normalizes data to the [0, 1] intervall. For other percentiles the normalized values will partially be outside the [0, 1] intervall. Use ScaleRange followed by ClipDescr if you want to limit the normalized values to a range.

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `ScaleRangeDescr`\n\nFor `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)\nthis processing step normalizes data to the [0, 1] intervall.\nFor other percentiles the normalized values will partially be outside the [0, 1]\nintervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the\nnormalized values to a range.",
  "properties": {
    "axes": {
      "anyOf": [
        {
          "items": {
            "maxLength": 16,
            "minLength": 1,
            "title": "AxisId",
            "type": "string"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute the min/max percentile value.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize samples independently, leave out the \"batch\" axis.\nDefault: Scale all axes jointly.",
      "examples": [
        [
          "batch",
          "x",
          "y"
        ]
      ],
      "title": "Axes"
    },
    "min_percentile": {
      "default": 0.0,
      "description": "The lower percentile used to determine the value to align with zero.",
      "exclusiveMaximum": 100,
      "minimum": 0,
      "title": "Min Percentile",
      "type": "number"
    },
    "max_percentile": {
      "default": 100.0,
      "description": "The upper percentile used to determine the value to align with one.\nHas to be bigger than `min_percentile`.\nThe range is 1 to 100 instead of 0 to 100 to avoid mistakenly\naccepting percentiles specified in the range 0.0 to 1.0.",
      "exclusiveMinimum": 1,
      "maximum": 100,
      "title": "Max Percentile",
      "type": "number"
    },
    "eps": {
      "default": 1e-06,
      "description": "Epsilon for numeric stability.\n`out = (tensor - v_lower) / (v_upper - v_lower + eps)`;\nwith `v_lower,v_upper` values at the respective percentiles.",
      "exclusiveMinimum": 0,
      "maximum": 0.1,
      "title": "Eps",
      "type": "number"
    },
    "reference_tensor": {
      "anyOf": [
        {
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Tensor ID to compute the percentiles from. Default: The tensor itself.\nFor any tensor in `inputs` only input tensor references are allowed.",
      "title": "Reference Tensor"
    }
  },
  "title": "model.v0_5.ScaleRangeKwargs",
  "type": "object"
}

Fields:

Validators:

axes pydantic-field ¤

axes: Annotated[
    Optional[Sequence[AxisId]],
    Field(examples=[("batch", "x", "y")]),
] = None

The subset of axes to normalize jointly, i.e. axes to reduce to compute the min/max percentile value. For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x') resulting in a tensor of equal shape normalized per channel, specify axes=('batch', 'x', 'y'). To normalize samples independently, leave out the "batch" axis. Default: Scale all axes jointly.

eps pydantic-field ¤

eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-06

Epsilon for numeric stability. out = (tensor - v_lower) / (v_upper - v_lower + eps); with v_lower,v_upper values at the respective percentiles.

max_percentile pydantic-field ¤

max_percentile: Annotated[float, Interval(gt=1, le=100)] = (
    100.0
)

The upper percentile used to determine the value to align with one. Has to be bigger than min_percentile. The range is 1 to 100 instead of 0 to 100 to avoid mistakenly accepting percentiles specified in the range 0.0 to 1.0.

min_percentile pydantic-field ¤

min_percentile: Annotated[float, Interval(ge=0, lt=100)] = (
    0.0
)

The lower percentile used to determine the value to align with zero.

reference_tensor pydantic-field ¤

reference_tensor: Optional[TensorId] = None

Tensor ID to compute the percentiles from. Default: The tensor itself. For any tensor in inputs only input tensor references are allowed.

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

min_smaller_max pydantic-validator ¤

min_smaller_max(
    value: float, info: ValidationInfo
) -> float
Source code in src/bioimageio/spec/model/v0_5.py
1389
1390
1391
1392
1393
1394
1395
@field_validator("max_percentile", mode="after")
@classmethod
def min_smaller_max(cls, value: float, info: ValidationInfo) -> float:
    if (min_p := info.data["min_percentile"]) >= value:
        raise ValueError(f"min_percentile {min_p} >= max_percentile {value}")

    return value

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

SigmoidDescr pydantic-model ¤

Bases: ProcessingDescrBase

The logistic sigmoid function, a.k.a. expit function.

Examples:

  • in YAML
    postprocessing:
      - id: sigmoid
    
  • in Python: >>> postprocessing = [SigmoidDescr()]
Show JSON schema:
{
  "additionalProperties": false,
  "description": "The logistic sigmoid function, a.k.a. expit function.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: sigmoid\n    ```\n- in Python:\n    >>> postprocessing = [SigmoidDescr()]",
  "properties": {
    "id": {
      "const": "sigmoid",
      "title": "Id",
      "type": "string"
    }
  },
  "required": [
    "id"
  ],
  "title": "model.v0_5.SigmoidDescr",
  "type": "object"
}

Fields:

  • id (Literal['sigmoid'])

id pydantic-field ¤

id: Literal['sigmoid'] = 'sigmoid'

implemented_id class-attribute ¤

implemented_id: Literal['sigmoid'] = 'sigmoid'

kwargs property ¤

empty kwargs

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

SizeReference pydantic-model ¤

Bases: Node

A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.

axis.size = reference.size * reference.scale / axis.scale + offset

Note: 1. The axis and the referenced axis need to have the same unit (or no unit). 2. Batch axes may not be referenced. 3. Fractions are rounded down. 4. If the reference axis is concatenable the referencing axis is assumed to be concatenable as well with the same block order.

Example: An unisotropic input image of wh=10049 pixels depicts a phsical space of 200196mm². Let's assume that we want to express the image height h in relation to its width w instead of only accepting input images of exactly 10049 pixels (for example to express a range of valid image shapes by parametrizing w, see ParameterizedSize).

w = SpaceInputAxis(id=AxisId("w"), size=100, unit="millimeter", scale=2) h = SpaceInputAxis( ... id=AxisId("h"), ... size=SizeReference(tensor_id=TensorId("input"), axis_id=AxisId("w"), offset=-1), ... unit="millimeter", ... scale=4, ... ) print(h.size.get_size(h, w)) 49

⇒ h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49

Show JSON schema:
{
  "additionalProperties": false,
  "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
  "properties": {
    "tensor_id": {
      "description": "tensor id of the reference axis",
      "maxLength": 32,
      "minLength": 1,
      "title": "TensorId",
      "type": "string"
    },
    "axis_id": {
      "description": "axis id of the reference axis",
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "offset": {
      "default": 0,
      "title": "Offset",
      "type": "integer"
    }
  },
  "required": [
    "tensor_id",
    "axis_id"
  ],
  "title": "model.v0_5.SizeReference",
  "type": "object"
}

Fields:

axis_id pydantic-field ¤

axis_id: AxisId

axis id of the reference axis

offset pydantic-field ¤

offset: StrictInt = 0

tensor_id pydantic-field ¤

tensor_id: TensorId

tensor id of the reference axis

get_size ¤

Compute the concrete size for a given axis and its reference axis.

Parameters:

Source code in src/bioimageio/spec/model/v0_5.py
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
def get_size(
    self,
    axis: Union[
        ChannelAxis,
        IndexInputAxis,
        IndexOutputAxis,
        TimeInputAxis,
        SpaceInputAxis,
        TimeOutputAxis,
        TimeOutputAxisWithHalo,
        SpaceOutputAxis,
        SpaceOutputAxisWithHalo,
    ],
    ref_axis: Union[
        ChannelAxis,
        IndexInputAxis,
        IndexOutputAxis,
        TimeInputAxis,
        SpaceInputAxis,
        TimeOutputAxis,
        TimeOutputAxisWithHalo,
        SpaceOutputAxis,
        SpaceOutputAxisWithHalo,
    ],
    n: ParameterizedSize_N = 0,
    ref_size: Optional[int] = None,
):
    """Compute the concrete size for a given axis and its reference axis.

    Args:
        axis: The axis this `SizeReference` is the size of.
        ref_axis: The reference axis to compute the size from.
        n: If the **ref_axis** is parameterized (of type `ParameterizedSize`)
            and no fixed **ref_size** is given,
            **n** is used to compute the size of the parameterized **ref_axis**.
        ref_size: Overwrite the reference size instead of deriving it from
            **ref_axis**
            (**ref_axis.scale** is still used; any given **n** is ignored).
    """
    assert axis.size == self, (
        "Given `axis.size` is not defined by this `SizeReference`"
    )

    assert ref_axis.id == self.axis_id, (
        f"Expected `ref_axis.id` to be {self.axis_id}, but got {ref_axis.id}."
    )

    assert axis.unit == ref_axis.unit, (
        "`SizeReference` requires `axis` and `ref_axis` to have the same `unit`,"
        f" but {axis.unit}!={ref_axis.unit}"
    )
    if ref_size is None:
        if isinstance(ref_axis.size, (int, float)):
            ref_size = ref_axis.size
        elif isinstance(ref_axis.size, ParameterizedSize):
            ref_size = ref_axis.size.get_size(n)
        elif isinstance(ref_axis.size, DataDependentSize):
            raise ValueError(
                "Reference axis referenced in `SizeReference` may not be a `DataDependentSize`."
            )
        elif isinstance(ref_axis.size, SizeReference):
            raise ValueError(
                "Reference axis referenced in `SizeReference` may not be sized by a"
                + " `SizeReference` itself."
            )
        else:
            assert_never(ref_axis.size)

    return int(ref_size * ref_axis.scale / axis.scale + self.offset)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

SoftmaxDescr pydantic-model ¤

Bases: ProcessingDescrBase

The softmax function.

Examples:

  • in YAML
    postprocessing:
      - id: softmax
        kwargs:
          axis: channel
    
  • in Python: >>> postprocessing = [SoftmaxDescr(kwargs=SoftmaxKwargs(axis=AxisId("channel")))]
Show JSON schema:
{
  "$defs": {
    "SoftmaxKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `SoftmaxDescr`",
      "properties": {
        "axis": {
          "default": "channel",
          "description": "The axis to apply the softmax function along.\nNote:\n    Defaults to 'channel' axis\n    (which may not exist, in which case\n    a different axis id has to be specified).",
          "examples": [
            "channel"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        }
      },
      "title": "model.v0_5.SoftmaxKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "The softmax function.\n\nExamples:\n- in YAML\n    ```yaml\n    postprocessing:\n      - id: softmax\n        kwargs:\n          axis: channel\n    ```\n- in Python:\n    >>> postprocessing = [SoftmaxDescr(kwargs=SoftmaxKwargs(axis=AxisId(\"channel\")))]",
  "properties": {
    "id": {
      "const": "softmax",
      "title": "Id",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/SoftmaxKwargs"
    }
  },
  "required": [
    "id"
  ],
  "title": "model.v0_5.SoftmaxDescr",
  "type": "object"
}

Fields:

id pydantic-field ¤

id: Literal['softmax'] = 'softmax'

implemented_id class-attribute ¤

implemented_id: Literal['softmax'] = 'softmax'

kwargs pydantic-field ¤

kwargs: SoftmaxKwargs

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

SoftmaxKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for SoftmaxDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `SoftmaxDescr`",
  "properties": {
    "axis": {
      "default": "channel",
      "description": "The axis to apply the softmax function along.\nNote:\n    Defaults to 'channel' axis\n    (which may not exist, in which case\n    a different axis id has to be specified).",
      "examples": [
        "channel"
      ],
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    }
  },
  "title": "model.v0_5.SoftmaxKwargs",
  "type": "object"
}

Fields:

axis pydantic-field ¤

axis: Annotated[NonBatchAxisId, Field(examples=["channel"])]

The axis to apply the softmax function along. Note: Defaults to 'channel' axis (which may not exist, in which case a different axis id has to be specified).

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

SpaceAxisBase pydantic-model ¤

Bases: AxisBase

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "id": {
      "default": "x",
      "examples": [
        "x",
        "y",
        "z"
      ],
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "space",
      "title": "Type",
      "type": "string"
    },
    "unit": {
      "anyOf": [
        {
          "enum": [
            "attometer",
            "angstrom",
            "centimeter",
            "decimeter",
            "exameter",
            "femtometer",
            "foot",
            "gigameter",
            "hectometer",
            "inch",
            "kilometer",
            "megameter",
            "meter",
            "micrometer",
            "mile",
            "millimeter",
            "nanometer",
            "parsec",
            "petameter",
            "picometer",
            "terameter",
            "yard",
            "yoctometer",
            "yottameter",
            "zeptometer",
            "zettameter"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "title": "Unit"
    },
    "scale": {
      "default": 1.0,
      "exclusiveMinimum": 0,
      "title": "Scale",
      "type": "number"
    }
  },
  "required": [
    "type"
  ],
  "title": "model.v0_5.SpaceAxisBase",
  "type": "object"
}

Fields:

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

id: Annotated[
    NonBatchAxisId, Field(examples=["x", "y", "z"])
]

implemented_type class-attribute ¤

implemented_type: Literal['space'] = 'space'

scale pydantic-field ¤

scale: Annotated[float, Gt(0)] = 1.0

type pydantic-field ¤

type: Literal['space'] = 'space'

unit pydantic-field ¤

unit: Optional[SpaceUnit] = None

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

SpaceInputAxis pydantic-model ¤

Bases: SpaceAxisBase, _WithInputAxisSize

Show JSON schema:
{
  "$defs": {
    "ParameterizedSize": {
      "additionalProperties": false,
      "description": "Describes a range of valid tensor axis sizes as `size = min + n*step`.\n\n- **min** and **step** are given by the model description.\n- All blocksize paramters n = 0,1,2,... yield a valid `size`.\n- A greater blocksize paramter n = 0,1,2,... results in a greater **size**.\n  This allows to adjust the axis size more generically.",
      "properties": {
        "min": {
          "exclusiveMinimum": 0,
          "title": "Min",
          "type": "integer"
        },
        "step": {
          "exclusiveMinimum": 0,
          "title": "Step",
          "type": "integer"
        }
      },
      "required": [
        "min",
        "step"
      ],
      "title": "model.v0_5.ParameterizedSize",
      "type": "object"
    },
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "size": {
      "anyOf": [
        {
          "exclusiveMinimum": 0,
          "type": "integer"
        },
        {
          "$ref": "#/$defs/ParameterizedSize"
        },
        {
          "$ref": "#/$defs/SizeReference"
        }
      ],
      "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
      "examples": [
        10,
        {
          "min": 32,
          "step": 16
        },
        {
          "axis_id": "a",
          "offset": 5,
          "tensor_id": "t"
        }
      ],
      "title": "Size"
    },
    "id": {
      "default": "x",
      "examples": [
        "x",
        "y",
        "z"
      ],
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "space",
      "title": "Type",
      "type": "string"
    },
    "unit": {
      "anyOf": [
        {
          "enum": [
            "attometer",
            "angstrom",
            "centimeter",
            "decimeter",
            "exameter",
            "femtometer",
            "foot",
            "gigameter",
            "hectometer",
            "inch",
            "kilometer",
            "megameter",
            "meter",
            "micrometer",
            "mile",
            "millimeter",
            "nanometer",
            "parsec",
            "petameter",
            "picometer",
            "terameter",
            "yard",
            "yoctometer",
            "yottameter",
            "zeptometer",
            "zettameter"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "title": "Unit"
    },
    "scale": {
      "default": 1.0,
      "exclusiveMinimum": 0,
      "title": "Scale",
      "type": "number"
    },
    "concatenable": {
      "default": false,
      "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
      "title": "Concatenable",
      "type": "boolean"
    }
  },
  "required": [
    "size",
    "type"
  ],
  "title": "model.v0_5.SpaceInputAxis",
  "type": "object"
}

Fields:

concatenable pydantic-field ¤

concatenable: bool = False

If a model has a concatenable input axis, it can be processed blockwise, splitting a longer sample axis into blocks matching its input tensor description. Output axes are concatenable if they have a SizeReference to a concatenable input axis.

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

id: Annotated[
    NonBatchAxisId, Field(examples=["x", "y", "z"])
]

implemented_type class-attribute ¤

implemented_type: Literal['space'] = 'space'

scale pydantic-field ¤

scale: Annotated[float, Gt(0)] = 1.0

size pydantic-field ¤

size: Annotated[
    Union[
        Annotated[int, Gt(0)],
        ParameterizedSize,
        SizeReference,
    ],
    Field(
        examples=[
            10,
            ParameterizedSize(min=32, step=16).model_dump(
                mode="json"
            ),
            SizeReference(
                tensor_id=TensorId("t"),
                axis_id=AxisId("a"),
                offset=5,
            ).model_dump(mode="json"),
        ]
    ),
]

The size/length of this axis can be specified as - fixed integer - parameterized series of valid sizes (ParameterizedSize) - reference to another axis with an optional offset (SizeReference)

type pydantic-field ¤

type: Literal['space'] = 'space'

unit pydantic-field ¤

unit: Optional[SpaceUnit] = None

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

SpaceOutputAxis pydantic-model ¤

Bases: SpaceAxisBase, _WithOutputAxisSize

Show JSON schema:
{
  "$defs": {
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "size": {
      "anyOf": [
        {
          "exclusiveMinimum": 0,
          "type": "integer"
        },
        {
          "$ref": "#/$defs/SizeReference"
        }
      ],
      "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (see `SizeReference`)",
      "examples": [
        10,
        {
          "axis_id": "a",
          "offset": 5,
          "tensor_id": "t"
        }
      ],
      "title": "Size"
    },
    "id": {
      "default": "x",
      "examples": [
        "x",
        "y",
        "z"
      ],
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "space",
      "title": "Type",
      "type": "string"
    },
    "unit": {
      "anyOf": [
        {
          "enum": [
            "attometer",
            "angstrom",
            "centimeter",
            "decimeter",
            "exameter",
            "femtometer",
            "foot",
            "gigameter",
            "hectometer",
            "inch",
            "kilometer",
            "megameter",
            "meter",
            "micrometer",
            "mile",
            "millimeter",
            "nanometer",
            "parsec",
            "petameter",
            "picometer",
            "terameter",
            "yard",
            "yoctometer",
            "yottameter",
            "zeptometer",
            "zettameter"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "title": "Unit"
    },
    "scale": {
      "default": 1.0,
      "exclusiveMinimum": 0,
      "title": "Scale",
      "type": "number"
    }
  },
  "required": [
    "size",
    "type"
  ],
  "title": "model.v0_5.SpaceOutputAxis",
  "type": "object"
}

Fields:

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

id: Annotated[
    NonBatchAxisId, Field(examples=["x", "y", "z"])
]

implemented_type class-attribute ¤

implemented_type: Literal['space'] = 'space'

scale pydantic-field ¤

scale: Annotated[float, Gt(0)] = 1.0

size pydantic-field ¤

size: Annotated[
    Union[Annotated[int, Gt(0)], SizeReference],
    Field(
        examples=[
            10,
            SizeReference(
                tensor_id=TensorId("t"),
                axis_id=AxisId("a"),
                offset=5,
            ).model_dump(mode="json"),
        ]
    ),
]

The size/length of this axis can be specified as - fixed integer - reference to another axis with an optional offset (see SizeReference)

type pydantic-field ¤

type: Literal['space'] = 'space'

unit pydantic-field ¤

unit: Optional[SpaceUnit] = None

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

SpaceOutputAxisWithHalo pydantic-model ¤

Bases: SpaceAxisBase, WithHalo

Show JSON schema:
{
  "$defs": {
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "halo": {
      "description": "The halo should be cropped from the output tensor to avoid boundary effects.\nIt is to be cropped from both sides, i.e. `size_after_crop = size - 2 * halo`.\nTo document a halo that is already cropped by the model use `size.offset` instead.",
      "minimum": 1,
      "title": "Halo",
      "type": "integer"
    },
    "size": {
      "$ref": "#/$defs/SizeReference",
      "description": "reference to another axis with an optional offset (see `SizeReference`)",
      "examples": [
        10,
        {
          "axis_id": "a",
          "offset": 5,
          "tensor_id": "t"
        }
      ]
    },
    "id": {
      "default": "x",
      "examples": [
        "x",
        "y",
        "z"
      ],
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "space",
      "title": "Type",
      "type": "string"
    },
    "unit": {
      "anyOf": [
        {
          "enum": [
            "attometer",
            "angstrom",
            "centimeter",
            "decimeter",
            "exameter",
            "femtometer",
            "foot",
            "gigameter",
            "hectometer",
            "inch",
            "kilometer",
            "megameter",
            "meter",
            "micrometer",
            "mile",
            "millimeter",
            "nanometer",
            "parsec",
            "petameter",
            "picometer",
            "terameter",
            "yard",
            "yoctometer",
            "yottameter",
            "zeptometer",
            "zettameter"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "title": "Unit"
    },
    "scale": {
      "default": 1.0,
      "exclusiveMinimum": 0,
      "title": "Scale",
      "type": "number"
    }
  },
  "required": [
    "halo",
    "size",
    "type"
  ],
  "title": "model.v0_5.SpaceOutputAxisWithHalo",
  "type": "object"
}

Fields:

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

halo pydantic-field ¤

halo: Annotated[int, Ge(1)]

The halo should be cropped from the output tensor to avoid boundary effects. It is to be cropped from both sides, i.e. size_after_crop = size - 2 * halo. To document a halo that is already cropped by the model use size.offset instead.

id pydantic-field ¤

id: Annotated[
    NonBatchAxisId, Field(examples=["x", "y", "z"])
]

implemented_type class-attribute ¤

implemented_type: Literal['space'] = 'space'

scale pydantic-field ¤

scale: Annotated[float, Gt(0)] = 1.0

size pydantic-field ¤

size: Annotated[
    SizeReference,
    Field(
        examples=[
            10,
            SizeReference(
                tensor_id=TensorId("t"),
                axis_id=AxisId("a"),
                offset=5,
            ).model_dump(mode="json"),
        ]
    ),
]

reference to another axis with an optional offset (see SizeReference)

type pydantic-field ¤

type: Literal['space'] = 'space'

unit pydantic-field ¤

unit: Optional[SpaceUnit] = None

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

TensorDescrBase pydantic-model ¤

Bases: Node, Generic[IO_AxisT]

Show JSON schema:
{
  "$defs": {
    "BatchAxis": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "batch",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "batch",
          "title": "Type",
          "type": "string"
        },
        "size": {
          "anyOf": [
            {
              "const": 1,
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The batch size may be fixed to 1,\notherwise (the default) it may be chosen arbitrarily depending on available memory",
          "title": "Size"
        }
      },
      "required": [
        "type"
      ],
      "title": "model.v0_5.BatchAxis",
      "type": "object"
    },
    "ChannelAxis": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "channel",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "channel",
          "title": "Type",
          "type": "string"
        },
        "channel_names": {
          "items": {
            "minLength": 1,
            "title": "Identifier",
            "type": "string"
          },
          "minItems": 1,
          "title": "Channel Names",
          "type": "array"
        }
      },
      "required": [
        "type",
        "channel_names"
      ],
      "title": "model.v0_5.ChannelAxis",
      "type": "object"
    },
    "DataDependentSize": {
      "additionalProperties": false,
      "properties": {
        "min": {
          "default": 1,
          "exclusiveMinimum": 0,
          "title": "Min",
          "type": "integer"
        },
        "max": {
          "anyOf": [
            {
              "exclusiveMinimum": 1,
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Max"
        }
      },
      "title": "model.v0_5.DataDependentSize",
      "type": "object"
    },
    "FileDescr": {
      "additionalProperties": false,
      "description": "A file description",
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "File source",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        }
      },
      "required": [
        "source"
      ],
      "title": "_internal.io.FileDescr",
      "type": "object"
    },
    "IndexInputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/ParameterizedSize"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
          "examples": [
            10,
            {
              "min": 32,
              "step": 16
            },
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "index",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "index",
          "title": "Type",
          "type": "string"
        },
        "concatenable": {
          "default": false,
          "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
          "title": "Concatenable",
          "type": "boolean"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.IndexInputAxis",
      "type": "object"
    },
    "IndexOutputAxis": {
      "additionalProperties": false,
      "properties": {
        "id": {
          "default": "index",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "index",
          "title": "Type",
          "type": "string"
        },
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/SizeReference"
            },
            {
              "$ref": "#/$defs/DataDependentSize"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (`SizeReference`)\n- data dependent size using `DataDependentSize` (size is only known after model inference)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        }
      },
      "required": [
        "type",
        "size"
      ],
      "title": "model.v0_5.IndexOutputAxis",
      "type": "object"
    },
    "IntervalOrRatioDataDescr": {
      "additionalProperties": false,
      "properties": {
        "type": {
          "default": "float32",
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64"
          ],
          "examples": [
            "float32",
            "float64",
            "uint8",
            "uint16"
          ],
          "title": "Type",
          "type": "string"
        },
        "range": {
          "default": [
            null,
            null
          ],
          "description": "Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.\n`None` corresponds to min/max of what can be expressed by **type**.",
          "maxItems": 2,
          "minItems": 2,
          "prefixItems": [
            {
              "anyOf": [
                {
                  "type": "number"
                },
                {
                  "type": "null"
                }
              ]
            },
            {
              "anyOf": [
                {
                  "type": "number"
                },
                {
                  "type": "null"
                }
              ]
            }
          ],
          "title": "Range",
          "type": "array"
        },
        "unit": {
          "anyOf": [
            {
              "const": "arbitrary unit",
              "type": "string"
            },
            {
              "description": "An SI unit",
              "minLength": 1,
              "pattern": "^(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?((\u00b7(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?)|(/(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^+?[1-9]\\d*)?))*$",
              "title": "SiUnit",
              "type": "string"
            }
          ],
          "default": "arbitrary unit",
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "description": "Scale for data on an interval (or ratio) scale.",
          "title": "Scale",
          "type": "number"
        },
        "offset": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Offset for data on a ratio scale.",
          "title": "Offset"
        }
      },
      "title": "model.v0_5.IntervalOrRatioDataDescr",
      "type": "object"
    },
    "NominalOrOrdinalDataDescr": {
      "additionalProperties": false,
      "properties": {
        "values": {
          "anyOf": [
            {
              "items": {
                "type": "integer"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "number"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "boolean"
              },
              "minItems": 1,
              "type": "array"
            },
            {
              "items": {
                "type": "string"
              },
              "minItems": 1,
              "type": "array"
            }
          ],
          "description": "A fixed set of nominal or an ascending sequence of ordinal values.\nIn this case `data.type` is required to be an unsigend integer type, e.g. 'uint8'.\nString `values` are interpreted as labels for tensor values 0, ..., N.\nNote: as YAML 1.2 does not natively support a \"set\" datatype,\nnominal values should be given as a sequence (aka list/array) as well.",
          "title": "Values"
        },
        "type": {
          "default": "uint8",
          "enum": [
            "float32",
            "float64",
            "uint8",
            "int8",
            "uint16",
            "int16",
            "uint32",
            "int32",
            "uint64",
            "int64",
            "bool"
          ],
          "examples": [
            "float32",
            "uint8",
            "uint16",
            "int64",
            "bool"
          ],
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "const": "arbitrary unit",
              "type": "string"
            },
            {
              "description": "An SI unit",
              "minLength": 1,
              "pattern": "^(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?((\u00b7(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^[+-]?[1-9]\\d*)?)|(/(Q|R|Y|Z|E|P|T|G|M|k|h|da|d|c|m|\u00b5|n|p|f|a|z|y|r|q)?(m|g|s|A|K|mol|cd|Hz|N|Pa|J|W|C|V|F|\u03a9|S|Wb|T|H|lm|lx|Bq|Gy|Sv|kat|l|L)(\\^+?[1-9]\\d*)?))*$",
              "title": "SiUnit",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        }
      },
      "required": [
        "values"
      ],
      "title": "model.v0_5.NominalOrOrdinalDataDescr",
      "type": "object"
    },
    "ParameterizedSize": {
      "additionalProperties": false,
      "description": "Describes a range of valid tensor axis sizes as `size = min + n*step`.\n\n- **min** and **step** are given by the model description.\n- All blocksize paramters n = 0,1,2,... yield a valid `size`.\n- A greater blocksize paramter n = 0,1,2,... results in a greater **size**.\n  This allows to adjust the axis size more generically.",
      "properties": {
        "min": {
          "exclusiveMinimum": 0,
          "title": "Min",
          "type": "integer"
        },
        "step": {
          "exclusiveMinimum": 0,
          "title": "Step",
          "type": "integer"
        }
      },
      "required": [
        "min",
        "step"
      ],
      "title": "model.v0_5.ParameterizedSize",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    },
    "SpaceInputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/ParameterizedSize"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
          "examples": [
            10,
            {
              "min": 32,
              "step": 16
            },
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "x",
          "examples": [
            "x",
            "y",
            "z"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "space",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attometer",
                "angstrom",
                "centimeter",
                "decimeter",
                "exameter",
                "femtometer",
                "foot",
                "gigameter",
                "hectometer",
                "inch",
                "kilometer",
                "megameter",
                "meter",
                "micrometer",
                "mile",
                "millimeter",
                "nanometer",
                "parsec",
                "petameter",
                "picometer",
                "terameter",
                "yard",
                "yoctometer",
                "yottameter",
                "zeptometer",
                "zettameter"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        },
        "concatenable": {
          "default": false,
          "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
          "title": "Concatenable",
          "type": "boolean"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.SpaceInputAxis",
      "type": "object"
    },
    "SpaceOutputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "x",
          "examples": [
            "x",
            "y",
            "z"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "space",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attometer",
                "angstrom",
                "centimeter",
                "decimeter",
                "exameter",
                "femtometer",
                "foot",
                "gigameter",
                "hectometer",
                "inch",
                "kilometer",
                "megameter",
                "meter",
                "micrometer",
                "mile",
                "millimeter",
                "nanometer",
                "parsec",
                "petameter",
                "picometer",
                "terameter",
                "yard",
                "yoctometer",
                "yottameter",
                "zeptometer",
                "zettameter"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.SpaceOutputAxis",
      "type": "object"
    },
    "SpaceOutputAxisWithHalo": {
      "additionalProperties": false,
      "properties": {
        "halo": {
          "description": "The halo should be cropped from the output tensor to avoid boundary effects.\nIt is to be cropped from both sides, i.e. `size_after_crop = size - 2 * halo`.\nTo document a halo that is already cropped by the model use `size.offset` instead.",
          "minimum": 1,
          "title": "Halo",
          "type": "integer"
        },
        "size": {
          "$ref": "#/$defs/SizeReference",
          "description": "reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ]
        },
        "id": {
          "default": "x",
          "examples": [
            "x",
            "y",
            "z"
          ],
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "space",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attometer",
                "angstrom",
                "centimeter",
                "decimeter",
                "exameter",
                "femtometer",
                "foot",
                "gigameter",
                "hectometer",
                "inch",
                "kilometer",
                "megameter",
                "meter",
                "micrometer",
                "mile",
                "millimeter",
                "nanometer",
                "parsec",
                "petameter",
                "picometer",
                "terameter",
                "yard",
                "yoctometer",
                "yottameter",
                "zeptometer",
                "zettameter"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "halo",
        "size",
        "type"
      ],
      "title": "model.v0_5.SpaceOutputAxisWithHalo",
      "type": "object"
    },
    "TimeInputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/ParameterizedSize"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
          "examples": [
            10,
            {
              "min": 32,
              "step": 16
            },
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "time",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "time",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attosecond",
                "centisecond",
                "day",
                "decisecond",
                "exasecond",
                "femtosecond",
                "gigasecond",
                "hectosecond",
                "hour",
                "kilosecond",
                "megasecond",
                "microsecond",
                "millisecond",
                "minute",
                "nanosecond",
                "petasecond",
                "picosecond",
                "second",
                "terasecond",
                "yoctosecond",
                "yottasecond",
                "zeptosecond",
                "zettasecond"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        },
        "concatenable": {
          "default": false,
          "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
          "title": "Concatenable",
          "type": "boolean"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.TimeInputAxis",
      "type": "object"
    },
    "TimeOutputAxis": {
      "additionalProperties": false,
      "properties": {
        "size": {
          "anyOf": [
            {
              "exclusiveMinimum": 0,
              "type": "integer"
            },
            {
              "$ref": "#/$defs/SizeReference"
            }
          ],
          "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ],
          "title": "Size"
        },
        "id": {
          "default": "time",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "time",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attosecond",
                "centisecond",
                "day",
                "decisecond",
                "exasecond",
                "femtosecond",
                "gigasecond",
                "hectosecond",
                "hour",
                "kilosecond",
                "megasecond",
                "microsecond",
                "millisecond",
                "minute",
                "nanosecond",
                "petasecond",
                "picosecond",
                "second",
                "terasecond",
                "yoctosecond",
                "yottasecond",
                "zeptosecond",
                "zettasecond"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "size",
        "type"
      ],
      "title": "model.v0_5.TimeOutputAxis",
      "type": "object"
    },
    "TimeOutputAxisWithHalo": {
      "additionalProperties": false,
      "properties": {
        "halo": {
          "description": "The halo should be cropped from the output tensor to avoid boundary effects.\nIt is to be cropped from both sides, i.e. `size_after_crop = size - 2 * halo`.\nTo document a halo that is already cropped by the model use `size.offset` instead.",
          "minimum": 1,
          "title": "Halo",
          "type": "integer"
        },
        "size": {
          "$ref": "#/$defs/SizeReference",
          "description": "reference to another axis with an optional offset (see `SizeReference`)",
          "examples": [
            10,
            {
              "axis_id": "a",
              "offset": 5,
              "tensor_id": "t"
            }
          ]
        },
        "id": {
          "default": "time",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "description": {
          "default": "",
          "description": "A short description of this axis beyond its type and id.",
          "maxLength": 128,
          "title": "Description",
          "type": "string"
        },
        "type": {
          "const": "time",
          "title": "Type",
          "type": "string"
        },
        "unit": {
          "anyOf": [
            {
              "enum": [
                "attosecond",
                "centisecond",
                "day",
                "decisecond",
                "exasecond",
                "femtosecond",
                "gigasecond",
                "hectosecond",
                "hour",
                "kilosecond",
                "megasecond",
                "microsecond",
                "millisecond",
                "minute",
                "nanosecond",
                "petasecond",
                "picosecond",
                "second",
                "terasecond",
                "yoctosecond",
                "yottasecond",
                "zeptosecond",
                "zettasecond"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Unit"
        },
        "scale": {
          "default": 1.0,
          "exclusiveMinimum": 0,
          "title": "Scale",
          "type": "number"
        }
      },
      "required": [
        "halo",
        "size",
        "type"
      ],
      "title": "model.v0_5.TimeOutputAxisWithHalo",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "id": {
      "description": "Tensor id. No duplicates are allowed.",
      "maxLength": 32,
      "minLength": 1,
      "title": "TensorId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "free text description",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "axes": {
      "description": "tensor axes",
      "items": {
        "anyOf": [
          {
            "discriminator": {
              "mapping": {
                "batch": "#/$defs/BatchAxis",
                "channel": "#/$defs/ChannelAxis",
                "index": "#/$defs/IndexInputAxis",
                "space": "#/$defs/SpaceInputAxis",
                "time": "#/$defs/TimeInputAxis"
              },
              "propertyName": "type"
            },
            "oneOf": [
              {
                "$ref": "#/$defs/BatchAxis"
              },
              {
                "$ref": "#/$defs/ChannelAxis"
              },
              {
                "$ref": "#/$defs/IndexInputAxis"
              },
              {
                "$ref": "#/$defs/TimeInputAxis"
              },
              {
                "$ref": "#/$defs/SpaceInputAxis"
              }
            ]
          },
          {
            "discriminator": {
              "mapping": {
                "batch": "#/$defs/BatchAxis",
                "channel": "#/$defs/ChannelAxis",
                "index": "#/$defs/IndexOutputAxis",
                "space": {
                  "oneOf": [
                    {
                      "$ref": "#/$defs/SpaceOutputAxis"
                    },
                    {
                      "$ref": "#/$defs/SpaceOutputAxisWithHalo"
                    }
                  ]
                },
                "time": {
                  "oneOf": [
                    {
                      "$ref": "#/$defs/TimeOutputAxis"
                    },
                    {
                      "$ref": "#/$defs/TimeOutputAxisWithHalo"
                    }
                  ]
                }
              },
              "propertyName": "type"
            },
            "oneOf": [
              {
                "$ref": "#/$defs/BatchAxis"
              },
              {
                "$ref": "#/$defs/ChannelAxis"
              },
              {
                "$ref": "#/$defs/IndexOutputAxis"
              },
              {
                "oneOf": [
                  {
                    "$ref": "#/$defs/TimeOutputAxis"
                  },
                  {
                    "$ref": "#/$defs/TimeOutputAxisWithHalo"
                  }
                ]
              },
              {
                "oneOf": [
                  {
                    "$ref": "#/$defs/SpaceOutputAxis"
                  },
                  {
                    "$ref": "#/$defs/SpaceOutputAxisWithHalo"
                  }
                ]
              }
            ]
          }
        ]
      },
      "minItems": 1,
      "title": "Axes",
      "type": "array"
    },
    "test_tensor": {
      "anyOf": [
        {
          "$ref": "#/$defs/FileDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "An example tensor to use for testing.\nUsing the model with the test input tensors is expected to yield the test output tensors.\nEach test tensor has be a an ndarray in the\n[numpy.lib file format](https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html#module-numpy.lib.format).\nThe file extension must be '.npy'."
    },
    "sample_tensor": {
      "anyOf": [
        {
          "$ref": "#/$defs/FileDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "A sample tensor to illustrate a possible input/output for the model,\nThe sample image primarily serves to inform a human user about an example use case\nand is typically stored as .hdf5, .png or .tiff.\nIt has to be readable by the [imageio library](https://imageio.readthedocs.io/en/stable/formats/index.html#supported-formats)\n(numpy's `.npy` format is not supported).\nThe image dimensionality has to match the number of axes specified in this tensor description."
    },
    "data": {
      "anyOf": [
        {
          "$ref": "#/$defs/NominalOrOrdinalDataDescr"
        },
        {
          "$ref": "#/$defs/IntervalOrRatioDataDescr"
        },
        {
          "items": {
            "anyOf": [
              {
                "$ref": "#/$defs/NominalOrOrdinalDataDescr"
              },
              {
                "$ref": "#/$defs/IntervalOrRatioDataDescr"
              }
            ]
          },
          "minItems": 1,
          "type": "array"
        }
      ],
      "default": {
        "type": "float32",
        "range": [
          null,
          null
        ],
        "unit": "arbitrary unit",
        "scale": 1.0,
        "offset": null
      },
      "description": "Description of the tensor's data values, optionally per channel.\nIf specified per channel, the data `type` needs to match across channels.",
      "title": "Data"
    }
  },
  "required": [
    "id",
    "axes"
  ],
  "title": "model.v0_5.TensorDescrBase",
  "type": "object"
}

Fields:

Validators:

  • _validate_axesaxes
  • _validate_sample_tensor
  • _check_data_type_across_channelsdata
  • _check_data_matches_channelaxis

axes pydantic-field ¤

axes: NotEmpty[Sequence[IO_AxisT]]

tensor axes

data pydantic-field ¤

data: Union[
    TensorDataDescr, NotEmpty[Sequence[TensorDataDescr]]
]

Description of the tensor's data values, optionally per channel. If specified per channel, the data type needs to match across channels.

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

free text description

dtype property ¤

dtype: Literal[
    "float32",
    "float64",
    "uint8",
    "int8",
    "uint16",
    "int16",
    "uint32",
    "int32",
    "uint64",
    "int64",
    "bool",
]

dtype as specified under data.type or data[i].type

id pydantic-field ¤

Tensor id. No duplicates are allowed.

sample_tensor pydantic-field ¤

sample_tensor: FAIR[Optional[FileDescr_]] = None

A sample tensor to illustrate a possible input/output for the model, The sample image primarily serves to inform a human user about an example use case and is typically stored as .hdf5, .png or .tiff. It has to be readable by the imageio library (numpy's .npy format is not supported). The image dimensionality has to match the number of axes specified in this tensor description.

shape property ¤

shape

test_tensor pydantic-field ¤

test_tensor: FAIR[Optional[FileDescr_]] = None

An example tensor to use for testing. Using the model with the test input tensors is expected to yield the test output tensors. Each test tensor has be a an ndarray in the numpy.lib file format. The file extension must be '.npy'.

get_axis_sizes_for_array ¤

get_axis_sizes_for_array(
    array: NDArray[Any],
) -> Dict[AxisId, int]
Source code in src/bioimageio/spec/model/v0_5.py
1684
1685
1686
1687
1688
1689
1690
def get_axis_sizes_for_array(self, array: NDArray[Any]) -> Dict[AxisId, int]:
    if len(array.shape) != len(self.axes):
        raise ValueError(
            f"Dimension mismatch: array shape {array.shape} (#{len(array.shape)})"
            + f" incompatible with {len(self.axes)} axes."
        )
    return {a.id: array.shape[i] for i, a in enumerate(self.axes)}

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

TensorId ¤

Bases: LowerCaseIdentifier


              flowchart TD
              bioimageio.spec.model.v0_5.TensorId[TensorId]
              bioimageio.spec._internal.types.LowerCaseIdentifier[LowerCaseIdentifier]
              bioimageio.spec._internal.validated_string.ValidatedString[ValidatedString]

                              bioimageio.spec._internal.types.LowerCaseIdentifier --> bioimageio.spec.model.v0_5.TensorId
                                bioimageio.spec._internal.validated_string.ValidatedString --> bioimageio.spec._internal.types.LowerCaseIdentifier
                



              click bioimageio.spec.model.v0_5.TensorId href "" "bioimageio.spec.model.v0_5.TensorId"
              click bioimageio.spec._internal.types.LowerCaseIdentifier href "" "bioimageio.spec._internal.types.LowerCaseIdentifier"
              click bioimageio.spec._internal.validated_string.ValidatedString href "" "bioimageio.spec._internal.validated_string.ValidatedString"
            

Methods:

Attributes:

root_model class-attribute ¤

root_model: Type[RootModel[Any]] = RootModel[
    Annotated[LowerCaseIdentifierAnno, MaxLen(32)]
]

__get_pydantic_core_schema__ classmethod ¤

__get_pydantic_core_schema__(
    source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema
Source code in src/bioimageio/spec/_internal/validated_string.py
29
30
31
32
33
@classmethod
def __get_pydantic_core_schema__(
    cls, source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema:
    return no_info_after_validator_function(cls, handler(str))

__get_pydantic_json_schema__ classmethod ¤

__get_pydantic_json_schema__(
    core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue
Source code in src/bioimageio/spec/_internal/validated_string.py
35
36
37
38
39
40
41
42
43
44
@classmethod
def __get_pydantic_json_schema__(
    cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
    json_schema = cls.root_model.model_json_schema(mode=handler.mode)
    json_schema["title"] = cls.__name__.strip("_")
    if cls.__doc__:
        json_schema["description"] = cls.__doc__

    return json_schema

__new__ ¤

__new__(object: object)
Source code in src/bioimageio/spec/_internal/validated_string.py
19
20
21
22
23
def __new__(cls, object: object):
    _validated = cls.root_model.model_validate(object).root
    self = super().__new__(cls, _validated)
    self._validated = _validated
    return self._after_validator()

TensorflowJsWeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_3.Author",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "The multi-file weights.\nAll required files/folders should be a zip archive.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "comment": {
      "default": "",
      "description": "A comment about this weights entry, for example how these weights were created.",
      "title": "Comment",
      "type": "string"
    },
    "tensorflow_version": {
      "$ref": "#/$defs/Version",
      "description": "Version of the TensorFlow library used."
    }
  },
  "required": [
    "source",
    "tensorflow_version"
  ],
  "title": "model.v0_5.TensorflowJsWeightsDescr",
  "type": "object"
}

Fields:

Validators:

  • _validate

authors pydantic-field ¤

authors: Optional[List[Author]] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

comment pydantic-field ¤

comment: str = ''

A comment about this weights entry, for example how these weights were created.

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: Annotated[
    FileSource, AfterValidator(wo_special_file_name)
]

The multi-file weights. All required files/folders should be a zip archive.

tensorflow_version pydantic-field ¤

tensorflow_version: Version

Version of the TensorFlow library used.

type class-attribute ¤

type: WeightsFormat = 'tensorflow_js'

weights_format_name class-attribute ¤

weights_format_name: str = 'Tensorflow.js'

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

TensorflowSavedModelBundleWeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_3.Author",
      "type": "object"
    },
    "FileDescr": {
      "additionalProperties": false,
      "description": "A file description",
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "File source",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        }
      },
      "required": [
        "source"
      ],
      "title": "_internal.io.FileDescr",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "The multi-file weights.\nAll required files/folders should be a zip archive.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "comment": {
      "default": "",
      "description": "A comment about this weights entry, for example how these weights were created.",
      "title": "Comment",
      "type": "string"
    },
    "tensorflow_version": {
      "$ref": "#/$defs/Version",
      "description": "Version of the TensorFlow library used."
    },
    "dependencies": {
      "anyOf": [
        {
          "$ref": "#/$defs/FileDescr",
          "examples": [
            {
              "source": "environment.yaml"
            }
          ]
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Custom dependencies beyond tensorflow.\nShould include tensorflow and any version pinning has to be compatible with **tensorflow_version**."
    }
  },
  "required": [
    "source",
    "tensorflow_version"
  ],
  "title": "model.v0_5.TensorflowSavedModelBundleWeightsDescr",
  "type": "object"
}

Fields:

Validators:

  • _validate

authors pydantic-field ¤

authors: Optional[List[Author]] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

comment pydantic-field ¤

comment: str = ''

A comment about this weights entry, for example how these weights were created.

dependencies pydantic-field ¤

dependencies: Optional[FileDescr_dependencies] = None

Custom dependencies beyond tensorflow. Should include tensorflow and any version pinning has to be compatible with tensorflow_version.

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: Annotated[
    FileSource, AfterValidator(wo_special_file_name)
]

The multi-file weights. All required files/folders should be a zip archive.

tensorflow_version pydantic-field ¤

tensorflow_version: Version

Version of the TensorFlow library used.

type class-attribute ¤

type: WeightsFormat = 'tensorflow_saved_model_bundle'

weights_format_name class-attribute ¤

weights_format_name: str = 'Tensorflow Saved Model'

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

TimeAxisBase pydantic-model ¤

Bases: AxisBase

Show JSON schema:
{
  "additionalProperties": false,
  "properties": {
    "id": {
      "default": "time",
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "time",
      "title": "Type",
      "type": "string"
    },
    "unit": {
      "anyOf": [
        {
          "enum": [
            "attosecond",
            "centisecond",
            "day",
            "decisecond",
            "exasecond",
            "femtosecond",
            "gigasecond",
            "hectosecond",
            "hour",
            "kilosecond",
            "megasecond",
            "microsecond",
            "millisecond",
            "minute",
            "nanosecond",
            "petasecond",
            "picosecond",
            "second",
            "terasecond",
            "yoctosecond",
            "yottasecond",
            "zeptosecond",
            "zettasecond"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "title": "Unit"
    },
    "scale": {
      "default": 1.0,
      "exclusiveMinimum": 0,
      "title": "Scale",
      "type": "number"
    }
  },
  "required": [
    "type"
  ],
  "title": "model.v0_5.TimeAxisBase",
  "type": "object"
}

Fields:

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

implemented_type class-attribute ¤

implemented_type: Literal['time'] = 'time'

scale pydantic-field ¤

scale: Annotated[float, Gt(0)] = 1.0

type pydantic-field ¤

type: Literal['time'] = 'time'

unit pydantic-field ¤

unit: Optional[TimeUnit] = None

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

TimeInputAxis pydantic-model ¤

Bases: TimeAxisBase, _WithInputAxisSize

Show JSON schema:
{
  "$defs": {
    "ParameterizedSize": {
      "additionalProperties": false,
      "description": "Describes a range of valid tensor axis sizes as `size = min + n*step`.\n\n- **min** and **step** are given by the model description.\n- All blocksize paramters n = 0,1,2,... yield a valid `size`.\n- A greater blocksize paramter n = 0,1,2,... results in a greater **size**.\n  This allows to adjust the axis size more generically.",
      "properties": {
        "min": {
          "exclusiveMinimum": 0,
          "title": "Min",
          "type": "integer"
        },
        "step": {
          "exclusiveMinimum": 0,
          "title": "Step",
          "type": "integer"
        }
      },
      "required": [
        "min",
        "step"
      ],
      "title": "model.v0_5.ParameterizedSize",
      "type": "object"
    },
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "size": {
      "anyOf": [
        {
          "exclusiveMinimum": 0,
          "type": "integer"
        },
        {
          "$ref": "#/$defs/ParameterizedSize"
        },
        {
          "$ref": "#/$defs/SizeReference"
        }
      ],
      "description": "The size/length of this axis can be specified as\n- fixed integer\n- parameterized series of valid sizes (`ParameterizedSize`)\n- reference to another axis with an optional offset (`SizeReference`)",
      "examples": [
        10,
        {
          "min": 32,
          "step": 16
        },
        {
          "axis_id": "a",
          "offset": 5,
          "tensor_id": "t"
        }
      ],
      "title": "Size"
    },
    "id": {
      "default": "time",
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "time",
      "title": "Type",
      "type": "string"
    },
    "unit": {
      "anyOf": [
        {
          "enum": [
            "attosecond",
            "centisecond",
            "day",
            "decisecond",
            "exasecond",
            "femtosecond",
            "gigasecond",
            "hectosecond",
            "hour",
            "kilosecond",
            "megasecond",
            "microsecond",
            "millisecond",
            "minute",
            "nanosecond",
            "petasecond",
            "picosecond",
            "second",
            "terasecond",
            "yoctosecond",
            "yottasecond",
            "zeptosecond",
            "zettasecond"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "title": "Unit"
    },
    "scale": {
      "default": 1.0,
      "exclusiveMinimum": 0,
      "title": "Scale",
      "type": "number"
    },
    "concatenable": {
      "default": false,
      "description": "If a model has a `concatenable` input axis, it can be processed blockwise,\nsplitting a longer sample axis into blocks matching its input tensor description.\nOutput axes are concatenable if they have a `SizeReference` to a concatenable\ninput axis.",
      "title": "Concatenable",
      "type": "boolean"
    }
  },
  "required": [
    "size",
    "type"
  ],
  "title": "model.v0_5.TimeInputAxis",
  "type": "object"
}

Fields:

concatenable pydantic-field ¤

concatenable: bool = False

If a model has a concatenable input axis, it can be processed blockwise, splitting a longer sample axis into blocks matching its input tensor description. Output axes are concatenable if they have a SizeReference to a concatenable input axis.

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

implemented_type class-attribute ¤

implemented_type: Literal['time'] = 'time'

scale pydantic-field ¤

scale: Annotated[float, Gt(0)] = 1.0

size pydantic-field ¤

size: Annotated[
    Union[
        Annotated[int, Gt(0)],
        ParameterizedSize,
        SizeReference,
    ],
    Field(
        examples=[
            10,
            ParameterizedSize(min=32, step=16).model_dump(
                mode="json"
            ),
            SizeReference(
                tensor_id=TensorId("t"),
                axis_id=AxisId("a"),
                offset=5,
            ).model_dump(mode="json"),
        ]
    ),
]

The size/length of this axis can be specified as - fixed integer - parameterized series of valid sizes (ParameterizedSize) - reference to another axis with an optional offset (SizeReference)

type pydantic-field ¤

type: Literal['time'] = 'time'

unit pydantic-field ¤

unit: Optional[TimeUnit] = None

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

TimeOutputAxis pydantic-model ¤

Bases: TimeAxisBase, _WithOutputAxisSize

Show JSON schema:
{
  "$defs": {
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "size": {
      "anyOf": [
        {
          "exclusiveMinimum": 0,
          "type": "integer"
        },
        {
          "$ref": "#/$defs/SizeReference"
        }
      ],
      "description": "The size/length of this axis can be specified as\n- fixed integer\n- reference to another axis with an optional offset (see `SizeReference`)",
      "examples": [
        10,
        {
          "axis_id": "a",
          "offset": 5,
          "tensor_id": "t"
        }
      ],
      "title": "Size"
    },
    "id": {
      "default": "time",
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "time",
      "title": "Type",
      "type": "string"
    },
    "unit": {
      "anyOf": [
        {
          "enum": [
            "attosecond",
            "centisecond",
            "day",
            "decisecond",
            "exasecond",
            "femtosecond",
            "gigasecond",
            "hectosecond",
            "hour",
            "kilosecond",
            "megasecond",
            "microsecond",
            "millisecond",
            "minute",
            "nanosecond",
            "petasecond",
            "picosecond",
            "second",
            "terasecond",
            "yoctosecond",
            "yottasecond",
            "zeptosecond",
            "zettasecond"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "title": "Unit"
    },
    "scale": {
      "default": 1.0,
      "exclusiveMinimum": 0,
      "title": "Scale",
      "type": "number"
    }
  },
  "required": [
    "size",
    "type"
  ],
  "title": "model.v0_5.TimeOutputAxis",
  "type": "object"
}

Fields:

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

id pydantic-field ¤

implemented_type class-attribute ¤

implemented_type: Literal['time'] = 'time'

scale pydantic-field ¤

scale: Annotated[float, Gt(0)] = 1.0

size pydantic-field ¤

size: Annotated[
    Union[Annotated[int, Gt(0)], SizeReference],
    Field(
        examples=[
            10,
            SizeReference(
                tensor_id=TensorId("t"),
                axis_id=AxisId("a"),
                offset=5,
            ).model_dump(mode="json"),
        ]
    ),
]

The size/length of this axis can be specified as - fixed integer - reference to another axis with an optional offset (see SizeReference)

type pydantic-field ¤

type: Literal['time'] = 'time'

unit pydantic-field ¤

unit: Optional[TimeUnit] = None

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

TimeOutputAxisWithHalo pydantic-model ¤

Bases: TimeAxisBase, WithHalo

Show JSON schema:
{
  "$defs": {
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "halo": {
      "description": "The halo should be cropped from the output tensor to avoid boundary effects.\nIt is to be cropped from both sides, i.e. `size_after_crop = size - 2 * halo`.\nTo document a halo that is already cropped by the model use `size.offset` instead.",
      "minimum": 1,
      "title": "Halo",
      "type": "integer"
    },
    "size": {
      "$ref": "#/$defs/SizeReference",
      "description": "reference to another axis with an optional offset (see `SizeReference`)",
      "examples": [
        10,
        {
          "axis_id": "a",
          "offset": 5,
          "tensor_id": "t"
        }
      ]
    },
    "id": {
      "default": "time",
      "maxLength": 16,
      "minLength": 1,
      "title": "AxisId",
      "type": "string"
    },
    "description": {
      "default": "",
      "description": "A short description of this axis beyond its type and id.",
      "maxLength": 128,
      "title": "Description",
      "type": "string"
    },
    "type": {
      "const": "time",
      "title": "Type",
      "type": "string"
    },
    "unit": {
      "anyOf": [
        {
          "enum": [
            "attosecond",
            "centisecond",
            "day",
            "decisecond",
            "exasecond",
            "femtosecond",
            "gigasecond",
            "hectosecond",
            "hour",
            "kilosecond",
            "megasecond",
            "microsecond",
            "millisecond",
            "minute",
            "nanosecond",
            "petasecond",
            "picosecond",
            "second",
            "terasecond",
            "yoctosecond",
            "yottasecond",
            "zeptosecond",
            "zettasecond"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "title": "Unit"
    },
    "scale": {
      "default": 1.0,
      "exclusiveMinimum": 0,
      "title": "Scale",
      "type": "number"
    }
  },
  "required": [
    "halo",
    "size",
    "type"
  ],
  "title": "model.v0_5.TimeOutputAxisWithHalo",
  "type": "object"
}

Fields:

description pydantic-field ¤

description: Annotated[str, MaxLen(128)] = ''

A short description of this axis beyond its type and id.

halo pydantic-field ¤

halo: Annotated[int, Ge(1)]

The halo should be cropped from the output tensor to avoid boundary effects. It is to be cropped from both sides, i.e. size_after_crop = size - 2 * halo. To document a halo that is already cropped by the model use size.offset instead.

id pydantic-field ¤

implemented_type class-attribute ¤

implemented_type: Literal['time'] = 'time'

scale pydantic-field ¤

scale: Annotated[float, Gt(0)] = 1.0

size pydantic-field ¤

size: Annotated[
    SizeReference,
    Field(
        examples=[
            10,
            SizeReference(
                tensor_id=TensorId("t"),
                axis_id=AxisId("a"),
                offset=5,
            ).model_dump(mode="json"),
        ]
    ),
]

reference to another axis with an optional offset (see SizeReference)

type pydantic-field ¤

type: Literal['time'] = 'time'

unit pydantic-field ¤

unit: Optional[TimeUnit] = None

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

TorchscriptWeightsDescr pydantic-model ¤

Bases: WeightsEntryDescrBase

Show JSON schema:
{
  "$defs": {
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_3.Author",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "Source of the weights file.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "comment": {
      "default": "",
      "description": "A comment about this weights entry, for example how these weights were created.",
      "title": "Comment",
      "type": "string"
    },
    "pytorch_version": {
      "$ref": "#/$defs/Version",
      "description": "Version of the PyTorch library used."
    }
  },
  "required": [
    "source",
    "pytorch_version"
  ],
  "title": "model.v0_5.TorchscriptWeightsDescr",
  "type": "object"
}

Fields:

Validators:

  • _validate

authors pydantic-field ¤

authors: Optional[List[Author]] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

comment pydantic-field ¤

comment: str = ''

A comment about this weights entry, for example how these weights were created.

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

pytorch_version pydantic-field ¤

pytorch_version: Version

Version of the PyTorch library used.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: Annotated[
    FileSource, AfterValidator(wo_special_file_name)
]

Source of the weights file.

type class-attribute ¤

type: WeightsFormat = 'torchscript'

weights_format_name class-attribute ¤

weights_format_name: str = 'TorchScript'

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

WeightsDescr pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "$defs": {
    "ArchitectureFromFileDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "Architecture source file",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "callable": {
          "description": "Identifier of the callable that returns a torch.nn.Module instance.",
          "examples": [
            "MyNetworkClass",
            "get_my_model"
          ],
          "minLength": 1,
          "title": "Identifier",
          "type": "string"
        },
        "kwargs": {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "description": "key word arguments for the `callable`",
          "title": "Kwargs",
          "type": "object"
        }
      },
      "required": [
        "source",
        "callable"
      ],
      "title": "model.v0_5.ArchitectureFromFileDescr",
      "type": "object"
    },
    "ArchitectureFromLibraryDescr": {
      "additionalProperties": false,
      "properties": {
        "callable": {
          "description": "Identifier of the callable that returns a torch.nn.Module instance.",
          "examples": [
            "MyNetworkClass",
            "get_my_model"
          ],
          "minLength": 1,
          "title": "Identifier",
          "type": "string"
        },
        "kwargs": {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "description": "key word arguments for the `callable`",
          "title": "Kwargs",
          "type": "object"
        },
        "import_from": {
          "description": "Where to import the callable from, i.e. `from <import_from> import <callable>`",
          "title": "Import From",
          "type": "string"
        }
      },
      "required": [
        "callable",
        "import_from"
      ],
      "title": "model.v0_5.ArchitectureFromLibraryDescr",
      "type": "object"
    },
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_3.Author",
      "type": "object"
    },
    "FileDescr": {
      "additionalProperties": false,
      "description": "A file description",
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "File source",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        }
      },
      "required": [
        "source"
      ],
      "title": "_internal.io.FileDescr",
      "type": "object"
    },
    "KerasHdf5WeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "Source of the weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "tensorflow_version": {
          "$ref": "#/$defs/Version",
          "description": "TensorFlow version used to create these weights."
        }
      },
      "required": [
        "source",
        "tensorflow_version"
      ],
      "title": "model.v0_5.KerasHdf5WeightsDescr",
      "type": "object"
    },
    "OnnxWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "Source of the weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "opset_version": {
          "description": "ONNX opset version",
          "minimum": 7,
          "title": "Opset Version",
          "type": "integer"
        },
        "external_data": {
          "anyOf": [
            {
              "$ref": "#/$defs/FileDescr",
              "examples": [
                {
                  "source": "weights.onnx.data"
                }
              ]
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Source of the external ONNX data file holding the weights.\n(If present **source** holds the ONNX architecture without weights)."
        }
      },
      "required": [
        "source",
        "opset_version"
      ],
      "title": "model.v0_5.OnnxWeightsDescr",
      "type": "object"
    },
    "PytorchStateDictWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "Source of the weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "architecture": {
          "anyOf": [
            {
              "$ref": "#/$defs/ArchitectureFromFileDescr"
            },
            {
              "$ref": "#/$defs/ArchitectureFromLibraryDescr"
            }
          ],
          "title": "Architecture"
        },
        "pytorch_version": {
          "$ref": "#/$defs/Version",
          "description": "Version of the PyTorch library used.\nIf `architecture.depencencies` is specified it has to include pytorch and any version pinning has to be compatible."
        },
        "dependencies": {
          "anyOf": [
            {
              "$ref": "#/$defs/FileDescr",
              "examples": [
                {
                  "source": "environment.yaml"
                }
              ]
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Custom depencies beyond pytorch described in a Conda environment file.\nAllows to specify custom dependencies, see conda docs:\n- [Exporting an environment file across platforms](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#exporting-an-environment-file-across-platforms)\n- [Creating an environment file manually](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-file-manually)\n\nThe conda environment file should include pytorch and any version pinning has to be compatible with\n**pytorch_version**."
        }
      },
      "required": [
        "source",
        "architecture",
        "pytorch_version"
      ],
      "title": "model.v0_5.PytorchStateDictWeightsDescr",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    },
    "TensorflowJsWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The multi-file weights.\nAll required files/folders should be a zip archive.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "tensorflow_version": {
          "$ref": "#/$defs/Version",
          "description": "Version of the TensorFlow library used."
        }
      },
      "required": [
        "source",
        "tensorflow_version"
      ],
      "title": "model.v0_5.TensorflowJsWeightsDescr",
      "type": "object"
    },
    "TensorflowSavedModelBundleWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "The multi-file weights.\nAll required files/folders should be a zip archive.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "tensorflow_version": {
          "$ref": "#/$defs/Version",
          "description": "Version of the TensorFlow library used."
        },
        "dependencies": {
          "anyOf": [
            {
              "$ref": "#/$defs/FileDescr",
              "examples": [
                {
                  "source": "environment.yaml"
                }
              ]
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Custom dependencies beyond tensorflow.\nShould include tensorflow and any version pinning has to be compatible with **tensorflow_version**."
        }
      },
      "required": [
        "source",
        "tensorflow_version"
      ],
      "title": "model.v0_5.TensorflowSavedModelBundleWeightsDescr",
      "type": "object"
    },
    "TorchscriptWeightsDescr": {
      "additionalProperties": false,
      "properties": {
        "source": {
          "anyOf": [
            {
              "description": "A URL with the HTTP or HTTPS scheme.",
              "format": "uri",
              "maxLength": 2083,
              "minLength": 1,
              "title": "HttpUrl",
              "type": "string"
            },
            {
              "$ref": "#/$defs/RelativeFilePath"
            },
            {
              "format": "file-path",
              "title": "FilePath",
              "type": "string"
            }
          ],
          "description": "Source of the weights file.",
          "title": "Source"
        },
        "sha256": {
          "anyOf": [
            {
              "description": "A SHA-256 hash value",
              "maxLength": 64,
              "minLength": 64,
              "title": "Sha256",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "SHA256 hash value of the **source** file.",
          "title": "Sha256"
        },
        "authors": {
          "anyOf": [
            {
              "items": {
                "$ref": "#/$defs/Author"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
          "title": "Authors"
        },
        "parent": {
          "anyOf": [
            {
              "enum": [
                "keras_hdf5",
                "onnx",
                "pytorch_state_dict",
                "tensorflow_js",
                "tensorflow_saved_model_bundle",
                "torchscript"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
          "examples": [
            "pytorch_state_dict"
          ],
          "title": "Parent"
        },
        "comment": {
          "default": "",
          "description": "A comment about this weights entry, for example how these weights were created.",
          "title": "Comment",
          "type": "string"
        },
        "pytorch_version": {
          "$ref": "#/$defs/Version",
          "description": "Version of the PyTorch library used."
        }
      },
      "required": [
        "source",
        "pytorch_version"
      ],
      "title": "model.v0_5.TorchscriptWeightsDescr",
      "type": "object"
    },
    "Version": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        }
      ],
      "description": "wraps a packaging.version.Version instance for validation in pydantic models",
      "title": "Version"
    },
    "YamlValue": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "format": "date",
          "type": "string"
        },
        {
          "format": "date-time",
          "type": "string"
        },
        {
          "type": "integer"
        },
        {
          "type": "number"
        },
        {
          "type": "string"
        },
        {
          "items": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "array"
        },
        {
          "additionalProperties": {
            "$ref": "#/$defs/YamlValue"
          },
          "type": "object"
        },
        {
          "type": "null"
        }
      ]
    }
  },
  "additionalProperties": false,
  "properties": {
    "keras_hdf5": {
      "anyOf": [
        {
          "$ref": "#/$defs/KerasHdf5WeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    },
    "onnx": {
      "anyOf": [
        {
          "$ref": "#/$defs/OnnxWeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    },
    "pytorch_state_dict": {
      "anyOf": [
        {
          "$ref": "#/$defs/PytorchStateDictWeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    },
    "tensorflow_js": {
      "anyOf": [
        {
          "$ref": "#/$defs/TensorflowJsWeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    },
    "tensorflow_saved_model_bundle": {
      "anyOf": [
        {
          "$ref": "#/$defs/TensorflowSavedModelBundleWeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    },
    "torchscript": {
      "anyOf": [
        {
          "$ref": "#/$defs/TorchscriptWeightsDescr"
        },
        {
          "type": "null"
        }
      ],
      "default": null
    }
  },
  "title": "model.v0_5.WeightsDescr",
  "type": "object"
}

Fields:

Validators:

available_formats property ¤

available_formats

keras_hdf5 pydantic-field ¤

keras_hdf5: Optional[KerasHdf5WeightsDescr] = None

missing_formats property ¤

missing_formats

onnx pydantic-field ¤

onnx: Optional[OnnxWeightsDescr] = None

pytorch_state_dict pydantic-field ¤

pytorch_state_dict: Optional[
    PytorchStateDictWeightsDescr
] = None

tensorflow_js pydantic-field ¤

tensorflow_js: Optional[TensorflowJsWeightsDescr] = None

tensorflow_saved_model_bundle pydantic-field ¤

tensorflow_saved_model_bundle: Optional[
    TensorflowSavedModelBundleWeightsDescr
] = None

torchscript pydantic-field ¤

torchscript: Optional[TorchscriptWeightsDescr] = None

__getitem__ ¤

__getitem__(
    key: Literal[
        "keras_hdf5",
        "onnx",
        "pytorch_state_dict",
        "tensorflow_js",
        "tensorflow_saved_model_bundle",
        "torchscript",
    ],
)
Source code in src/bioimageio/spec/model/v0_5.py
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
def __getitem__(
    self,
    key: Literal[
        "keras_hdf5",
        "onnx",
        "pytorch_state_dict",
        "tensorflow_js",
        "tensorflow_saved_model_bundle",
        "torchscript",
    ],
):
    if key == "keras_hdf5":
        ret = self.keras_hdf5
    elif key == "onnx":
        ret = self.onnx
    elif key == "pytorch_state_dict":
        ret = self.pytorch_state_dict
    elif key == "tensorflow_js":
        ret = self.tensorflow_js
    elif key == "tensorflow_saved_model_bundle":
        ret = self.tensorflow_saved_model_bundle
    elif key == "torchscript":
        ret = self.torchscript
    else:
        raise KeyError(key)

    if ret is None:
        raise KeyError(key)

    return ret

check_entries pydantic-validator ¤

check_entries() -> Self
Source code in src/bioimageio/spec/model/v0_5.py
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
@model_validator(mode="after")
def check_entries(self) -> Self:
    entries = {wtype for wtype, entry in self if entry is not None}

    if not entries:
        raise ValueError("Missing weights entry")

    entries_wo_parent = {
        wtype
        for wtype, entry in self
        if entry is not None and hasattr(entry, "parent") and entry.parent is None
    }
    if len(entries_wo_parent) != 1:
        issue_warning(
            "Exactly one weights entry may not specify the `parent` field (got"
            + " {value}). That entry is considered the original set of model weights."
            + " Other weight formats are created through conversion of the orignal or"
            + " already converted weights. They have to reference the weights format"
            + " they were converted from as their `parent`.",
            value=len(entries_wo_parent),
            field="weights",
        )

    for wtype, entry in self:
        if entry is None:
            continue

        assert hasattr(entry, "type")
        assert hasattr(entry, "parent")
        assert wtype == entry.type
        if (
            entry.parent is not None and entry.parent not in entries
        ):  # self reference checked for `parent` field
            raise ValueError(
                f"`weights.{wtype}.parent={entry.parent} not in specified weight"
                + f" formats: {entries}"
            )

    return self

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

WeightsEntryDescrBase pydantic-model ¤

Bases: FileDescr

Show JSON schema:
{
  "$defs": {
    "Author": {
      "additionalProperties": false,
      "properties": {
        "affiliation": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Affiliation",
          "title": "Affiliation"
        },
        "email": {
          "anyOf": [
            {
              "format": "email",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Email",
          "title": "Email"
        },
        "orcid": {
          "anyOf": [
            {
              "description": "An ORCID identifier, see https://orcid.org/",
              "title": "OrcidId",
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID\n) in hyphenated groups of 4 digits, (and [valid](\nhttps://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier\n) as per ISO 7064 11,2.)",
          "examples": [
            "0000-0001-2345-6789"
          ],
          "title": "Orcid"
        },
        "name": {
          "title": "Name",
          "type": "string"
        },
        "github_user": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "title": "Github User"
        }
      },
      "required": [
        "name"
      ],
      "title": "generic.v0_3.Author",
      "type": "object"
    },
    "RelativeFilePath": {
      "description": "A path relative to the `rdf.yaml` file (also if the RDF source is a URL).",
      "format": "path",
      "title": "RelativeFilePath",
      "type": "string"
    }
  },
  "additionalProperties": false,
  "properties": {
    "source": {
      "anyOf": [
        {
          "description": "A URL with the HTTP or HTTPS scheme.",
          "format": "uri",
          "maxLength": 2083,
          "minLength": 1,
          "title": "HttpUrl",
          "type": "string"
        },
        {
          "$ref": "#/$defs/RelativeFilePath"
        },
        {
          "format": "file-path",
          "title": "FilePath",
          "type": "string"
        }
      ],
      "description": "Source of the weights file.",
      "title": "Source"
    },
    "sha256": {
      "anyOf": [
        {
          "description": "A SHA-256 hash value",
          "maxLength": 64,
          "minLength": 64,
          "title": "Sha256",
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "SHA256 hash value of the **source** file.",
      "title": "Sha256"
    },
    "authors": {
      "anyOf": [
        {
          "items": {
            "$ref": "#/$defs/Author"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Authors\nEither the person(s) that have trained this model resulting in the original weights file.\n    (If this is the initial weights entry, i.e. it does not have a `parent`)\nOr the person(s) who have converted the weights to this weights format.\n    (If this is a child weight, i.e. it has a `parent` field)",
      "title": "Authors"
    },
    "parent": {
      "anyOf": [
        {
          "enum": [
            "keras_hdf5",
            "onnx",
            "pytorch_state_dict",
            "tensorflow_js",
            "tensorflow_saved_model_bundle",
            "torchscript"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The source weights these weights were converted from.\nFor example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,\nThe `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.\nAll weight entries except one (the initial set of weights resulting from training the model),\nneed to have this field.",
      "examples": [
        "pytorch_state_dict"
      ],
      "title": "Parent"
    },
    "comment": {
      "default": "",
      "description": "A comment about this weights entry, for example how these weights were created.",
      "title": "Comment",
      "type": "string"
    }
  },
  "required": [
    "source"
  ],
  "title": "model.v0_5.WeightsEntryDescrBase",
  "type": "object"
}

Fields:

Validators:

  • _validate

authors pydantic-field ¤

authors: Optional[List[Author]] = None

Authors Either the person(s) that have trained this model resulting in the original weights file. (If this is the initial weights entry, i.e. it does not have a parent) Or the person(s) who have converted the weights to this weights format. (If this is a child weight, i.e. it has a parent field)

comment pydantic-field ¤

comment: str = ''

A comment about this weights entry, for example how these weights were created.

parent pydantic-field ¤

parent: Annotated[
    Optional[WeightsFormat],
    Field(examples=["pytorch_state_dict"]),
] = None

The source weights these weights were converted from. For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. All weight entries except one (the initial set of weights resulting from training the model), need to have this field.

sha256 pydantic-field ¤

sha256: Optional[Sha256] = None

SHA256 hash value of the source file.

source pydantic-field ¤

source: Annotated[
    FileSource, AfterValidator(wo_special_file_name)
]

Source of the weights file.

type class-attribute ¤

weights_format_name class-attribute ¤

weights_format_name: str

download ¤

download(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

alias for .get_reader

Source code in src/bioimageio/spec/_internal/io.py
306
307
308
309
310
311
312
def download(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """alias for `.get_reader`"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

get_reader ¤

get_reader(
    *,
    progressbar: Union[
        Progressbar, Callable[[], Progressbar], bool, None
    ] = None,
)

open the file source (download if needed)

Source code in src/bioimageio/spec/_internal/io.py
298
299
300
301
302
303
304
def get_reader(
    self,
    *,
    progressbar: Union[Progressbar, Callable[[], Progressbar], bool, None] = None,
):
    """open the file source (download if needed)"""
    return get_reader(self.source, progressbar=progressbar, sha256=self.sha256)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

validate_sha256 ¤

validate_sha256(force_recompute: bool = False) -> None

validate the sha256 hash value of the source file

Source code in src/bioimageio/spec/_internal/io.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def validate_sha256(self, force_recompute: bool = False) -> None:
    """validate the sha256 hash value of the **source** file"""
    context = get_validation_context()
    src_str = str(self.source)
    if not force_recompute and src_str in context.known_files:
        actual_sha = context.known_files[src_str]
    else:
        reader = get_reader(self.source, sha256=self.sha256)
        if force_recompute:
            actual_sha = get_sha256(reader)
        else:
            actual_sha = reader.sha256

        context.known_files[src_str] = actual_sha

    if actual_sha is None:
        return
    elif self.sha256 == actual_sha:
        pass
    elif self.sha256 is None or context.update_hashes:
        self.sha256 = actual_sha
    elif self.sha256 != actual_sha:
        raise ValueError(
            f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
            + f"{actual_sha}. Update expected `sha256` or point to the matching "
            + "file."
        )

WithHalo pydantic-model ¤

Bases: Node

Show JSON schema:
{
  "$defs": {
    "SizeReference": {
      "additionalProperties": false,
      "description": "A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.\n\n`axis.size = reference.size * reference.scale / axis.scale + offset`\n\nNote:\n1. The axis and the referenced axis need to have the same unit (or no unit).\n2. Batch axes may not be referenced.\n3. Fractions are rounded down.\n4. If the reference axis is `concatenable` the referencing axis is assumed to be\n    `concatenable` as well with the same block order.\n\nExample:\nAn unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm\u00b2.\nLet's assume that we want to express the image height h in relation to its width w\ninstead of only accepting input images of exactly 100*49 pixels\n(for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).\n\n>>> w = SpaceInputAxis(id=AxisId(\"w\"), size=100, unit=\"millimeter\", scale=2)\n>>> h = SpaceInputAxis(\n...     id=AxisId(\"h\"),\n...     size=SizeReference(tensor_id=TensorId(\"input\"), axis_id=AxisId(\"w\"), offset=-1),\n...     unit=\"millimeter\",\n...     scale=4,\n... )\n>>> print(h.size.get_size(h, w))\n49\n\n\u21d2 h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49",
      "properties": {
        "tensor_id": {
          "description": "tensor id of the reference axis",
          "maxLength": 32,
          "minLength": 1,
          "title": "TensorId",
          "type": "string"
        },
        "axis_id": {
          "description": "axis id of the reference axis",
          "maxLength": 16,
          "minLength": 1,
          "title": "AxisId",
          "type": "string"
        },
        "offset": {
          "default": 0,
          "title": "Offset",
          "type": "integer"
        }
      },
      "required": [
        "tensor_id",
        "axis_id"
      ],
      "title": "model.v0_5.SizeReference",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "properties": {
    "halo": {
      "description": "The halo should be cropped from the output tensor to avoid boundary effects.\nIt is to be cropped from both sides, i.e. `size_after_crop = size - 2 * halo`.\nTo document a halo that is already cropped by the model use `size.offset` instead.",
      "minimum": 1,
      "title": "Halo",
      "type": "integer"
    },
    "size": {
      "$ref": "#/$defs/SizeReference",
      "description": "reference to another axis with an optional offset (see `SizeReference`)",
      "examples": [
        10,
        {
          "axis_id": "a",
          "offset": 5,
          "tensor_id": "t"
        }
      ]
    }
  },
  "required": [
    "halo",
    "size"
  ],
  "title": "model.v0_5.WithHalo",
  "type": "object"
}

Fields:

halo pydantic-field ¤

halo: Annotated[int, Ge(1)]

The halo should be cropped from the output tensor to avoid boundary effects. It is to be cropped from both sides, i.e. size_after_crop = size - 2 * halo. To document a halo that is already cropped by the model use size.offset instead.

size pydantic-field ¤

size: Annotated[
    SizeReference,
    Field(
        examples=[
            10,
            SizeReference(
                tensor_id=TensorId("t"),
                axis_id=AxisId("a"),
                offset=5,
            ).model_dump(mode="json"),
        ]
    ),
]

reference to another axis with an optional offset (see SizeReference)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ZeroMeanUnitVarianceDescr pydantic-model ¤

Bases: ProcessingDescrBase

Subtract mean and divide by variance.

Examples:

Subtract tensor mean and variance - in YAML

preprocessing:
  - id: zero_mean_unit_variance
- in Python

>>> preprocessing = [ZeroMeanUnitVarianceDescr()]
Show JSON schema:
{
  "$defs": {
    "ZeroMeanUnitVarianceKwargs": {
      "additionalProperties": false,
      "description": "key word arguments for `ZeroMeanUnitVarianceDescr`",
      "properties": {
        "axes": {
          "anyOf": [
            {
              "items": {
                "maxLength": 16,
                "minLength": 1,
                "title": "AxisId",
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize each sample independently leave out the 'batch' axis.\nDefault: Scale all axes jointly.",
          "examples": [
            [
              "batch",
              "x",
              "y"
            ]
          ],
          "title": "Axes"
        },
        "eps": {
          "default": 1e-06,
          "description": "epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`.",
          "exclusiveMinimum": 0,
          "maximum": 0.1,
          "title": "Eps",
          "type": "number"
        }
      },
      "title": "model.v0_5.ZeroMeanUnitVarianceKwargs",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Subtract mean and divide by variance.\n\nExamples:\n    Subtract tensor mean and variance\n    - in YAML\n    ```yaml\n    preprocessing:\n      - id: zero_mean_unit_variance\n    ```\n    - in Python\n    >>> preprocessing = [ZeroMeanUnitVarianceDescr()]",
  "properties": {
    "id": {
      "const": "zero_mean_unit_variance",
      "title": "Id",
      "type": "string"
    },
    "kwargs": {
      "$ref": "#/$defs/ZeroMeanUnitVarianceKwargs"
    }
  },
  "required": [
    "id"
  ],
  "title": "model.v0_5.ZeroMeanUnitVarianceDescr",
  "type": "object"
}

Fields:

id pydantic-field ¤

id: Literal["zero_mean_unit_variance"] = (
    "zero_mean_unit_variance"
)

implemented_id class-attribute ¤

implemented_id: Literal["zero_mean_unit_variance"] = (
    "zero_mean_unit_variance"
)

kwargs pydantic-field ¤

__pydantic_init_subclass__ classmethod ¤

__pydantic_init_subclass__(**kwargs: Any) -> None
Source code in src/bioimageio/spec/_internal/common_nodes.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
    explict_fields: Dict[str, Any] = {}
    for attr in dir(cls):
        if attr.startswith("implemented_"):
            field_name = attr.replace("implemented_", "")
            if field_name not in cls.model_fields:
                continue

            assert (
                cls.model_fields[field_name].get_default() is PydanticUndefined
            ), field_name
            default = getattr(cls, attr)
            explict_fields[field_name] = default

    cls._fields_to_set_explicitly = MappingProxyType(explict_fields)
    return super().__pydantic_init_subclass__(**kwargs)

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

ZeroMeanUnitVarianceKwargs pydantic-model ¤

Bases: ProcessingKwargs

key word arguments for ZeroMeanUnitVarianceDescr

Show JSON schema:
{
  "additionalProperties": false,
  "description": "key word arguments for `ZeroMeanUnitVarianceDescr`",
  "properties": {
    "axes": {
      "anyOf": [
        {
          "items": {
            "maxLength": 16,
            "minLength": 1,
            "title": "AxisId",
            "type": "string"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.\nFor example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')\nresulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.\nTo normalize each sample independently leave out the 'batch' axis.\nDefault: Scale all axes jointly.",
      "examples": [
        [
          "batch",
          "x",
          "y"
        ]
      ],
      "title": "Axes"
    },
    "eps": {
      "default": 1e-06,
      "description": "epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`.",
      "exclusiveMinimum": 0,
      "maximum": 0.1,
      "title": "Eps",
      "type": "number"
    }
  },
  "title": "model.v0_5.ZeroMeanUnitVarianceKwargs",
  "type": "object"
}

Fields:

  • axes (Annotated[Optional[Sequence[AxisId]], Field(examples=[('batch', 'x', 'y')])])
  • eps (Annotated[float, Interval(gt=0, le=0.1)])

axes pydantic-field ¤

axes: Annotated[
    Optional[Sequence[AxisId]],
    Field(examples=[("batch", "x", "y")]),
] = None

The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std. For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x') resulting in a tensor of equal shape normalized per channel, specify axes=('batch', 'x', 'y'). To normalize each sample independently leave out the 'batch' axis. Default: Scale all axes jointly.

eps pydantic-field ¤

eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-06

epsilon for numeric stability: out = (tensor - mean) / (std + eps).

__contains__ ¤

__contains__(item: str) -> bool
Source code in src/bioimageio/spec/_internal/common_nodes.py
425
426
def __contains__(self, item: str) -> bool:
    return item in self.__class__.model_fields

__getitem__ ¤

__getitem__(item: str) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
419
420
421
422
423
def __getitem__(self, item: str) -> Any:
    if item in self.__class__.model_fields:
        return getattr(self, item)
    else:
        raise KeyError(item)

get ¤

get(item: str, default: Any = None) -> Any
Source code in src/bioimageio/spec/_internal/common_nodes.py
416
417
def get(self, item: str, default: Any = None) -> Any:
    return self[item] if item in self else default

model_validate classmethod ¤

model_validate(
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[
        ValidationContext, Mapping[str, Any], None
    ] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self

Validate a pydantic model instance.

Parameters:

  • obj ¤
    (Union[Any, Mapping[str, Any]]) –

    The object to validate.

  • strict ¤
    (Optional[bool], default: None ) –

    Whether to raise an exception on invalid fields.

  • from_attributes ¤
    (Optional[bool], default: None ) –

    Whether to extract data from object attributes.

  • context ¤
    (Union[ValidationContext, Mapping[str, Any], None], default: None ) –

    Additional context to pass to the validator.

Raises:

  • ValidationError

    If the object failed validation.

Returns:

  • Self

    The validated description instance.

Source code in src/bioimageio/spec/_internal/node.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def model_validate(
    cls,
    obj: Union[Any, Mapping[str, Any]],
    *,
    strict: Optional[bool] = None,
    from_attributes: Optional[bool] = None,
    context: Union[ValidationContext, Mapping[str, Any], None] = None,
    by_alias: bool | None = None,
    by_name: bool | None = None,
) -> Self:
    """Validate a pydantic model instance.

    Args:
        obj: The object to validate.
        strict: Whether to raise an exception on invalid fields.
        from_attributes: Whether to extract data from object attributes.
        context: Additional context to pass to the validator.

    Raises:
        ValidationError: If the object failed validation.

    Returns:
        The validated description instance.
    """
    __tracebackhide__ = True

    if context is None:
        context = get_validation_context()
    elif isinstance(context, collections.abc.Mapping):
        context = ValidationContext(**context)

    assert not isinstance(obj, collections.abc.Mapping) or is_kwargs(obj), obj

    with context:
        # use validation context as context manager for equal behavior of __init__ and model_validate
        return super().model_validate(
            obj, strict=strict, from_attributes=from_attributes
        )

convert_axes ¤

convert_axes(
    axes: str,
    *,
    shape: Union[
        Sequence[int],
        _ParameterizedInputShape_v0_4,
        _ImplicitOutputShape_v0_4,
    ],
    tensor_type: Literal["input", "output"],
    halo: Optional[Sequence[int]],
    size_refs: Mapping[_TensorName_v0_4, Mapping[str, int]],
)
Source code in src/bioimageio/spec/model/v0_5.py
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
def convert_axes(
    axes: str,
    *,
    shape: Union[
        Sequence[int], _ParameterizedInputShape_v0_4, _ImplicitOutputShape_v0_4
    ],
    tensor_type: Literal["input", "output"],
    halo: Optional[Sequence[int]],
    size_refs: Mapping[_TensorName_v0_4, Mapping[str, int]],
):
    ret: List[AnyAxis] = []
    for i, a in enumerate(axes):
        axis_type = _AXIS_TYPE_MAP.get(a, a)
        if axis_type == "batch":
            ret.append(BatchAxis())
            continue

        scale = 1.0
        if isinstance(shape, _ParameterizedInputShape_v0_4):
            if shape.step[i] == 0:
                size = shape.min[i]
            else:
                size = ParameterizedSize(min=shape.min[i], step=shape.step[i])
        elif isinstance(shape, _ImplicitOutputShape_v0_4):
            ref_t = str(shape.reference_tensor)
            if ref_t.count(".") == 1:
                t_id, orig_a_id = ref_t.split(".")
            else:
                t_id = ref_t
                orig_a_id = a

            a_id = _AXIS_ID_MAP.get(orig_a_id, a)
            if not (orig_scale := shape.scale[i]):
                # old way to insert a new axis dimension
                size = int(2 * shape.offset[i])
            else:
                scale = 1 / orig_scale
                if axis_type in ("channel", "index"):
                    # these axes no longer have a scale
                    offset_from_scale = orig_scale * size_refs.get(
                        _TensorName_v0_4(t_id), {}
                    ).get(orig_a_id, 0)
                else:
                    offset_from_scale = 0
                size = SizeReference(
                    tensor_id=TensorId(t_id),
                    axis_id=AxisId(a_id),
                    offset=int(offset_from_scale + 2 * shape.offset[i]),
                )
        else:
            size = shape[i]

        if axis_type == "time":
            if tensor_type == "input":
                ret.append(TimeInputAxis(size=size, scale=scale))
            else:
                assert not isinstance(size, ParameterizedSize)
                if halo is None:
                    ret.append(TimeOutputAxis(size=size, scale=scale))
                else:
                    assert not isinstance(size, int)
                    ret.append(
                        TimeOutputAxisWithHalo(size=size, scale=scale, halo=halo[i])
                    )

        elif axis_type == "index":
            if tensor_type == "input":
                ret.append(IndexInputAxis(size=size))
            else:
                if isinstance(size, ParameterizedSize):
                    size = DataDependentSize(min=size.min)

                ret.append(IndexOutputAxis(size=size))
        elif axis_type == "channel":
            assert not isinstance(size, ParameterizedSize)
            if isinstance(size, SizeReference):
                warnings.warn(
                    "Conversion of channel size from an implicit output shape may be"
                    + " wrong"
                )
                ret.append(
                    ChannelAxis(
                        channel_names=[
                            Identifier(f"channel{i}") for i in range(size.offset)
                        ]
                    )
                )
            else:
                ret.append(
                    ChannelAxis(
                        channel_names=[Identifier(f"channel{i}") for i in range(size)]
                    )
                )
        elif axis_type == "space":
            if tensor_type == "input":
                ret.append(SpaceInputAxis(id=AxisId(a), size=size, scale=scale))
            else:
                assert not isinstance(size, ParameterizedSize)
                if halo is None or halo[i] == 0:
                    ret.append(SpaceOutputAxis(id=AxisId(a), size=size, scale=scale))
                elif isinstance(size, int):
                    raise NotImplementedError(
                        f"output axis with halo and fixed size (here {size}) not allowed"
                    )
                else:
                    ret.append(
                        SpaceOutputAxisWithHalo(
                            id=AxisId(a), size=size, scale=scale, halo=halo[i]
                        )
                    )

    return ret

generate_covers ¤

generate_covers(
    inputs: Sequence[Tuple[InputTensorDescr, NDArray[Any]]],
    outputs: Sequence[
        Tuple[OutputTensorDescr, NDArray[Any]]
    ],
) -> List[Path]
Source code in src/bioimageio/spec/model/v0_5.py
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
def generate_covers(
    inputs: Sequence[Tuple[InputTensorDescr, NDArray[Any]]],
    outputs: Sequence[Tuple[OutputTensorDescr, NDArray[Any]]],
) -> List[Path]:
    def squeeze(
        data: NDArray[Any], axes: Sequence[AnyAxis]
    ) -> Tuple[NDArray[Any], List[AnyAxis]]:
        """apply numpy.ndarray.squeeze while keeping track of the axis descriptions remaining"""
        if data.ndim != len(axes):
            raise ValueError(
                f"tensor shape {data.shape} does not match described axes"
                + f" {[a.id for a in axes]}"
            )

        axes = [deepcopy(a) for a, s in zip(axes, data.shape) if s != 1]
        return data.squeeze(), axes

    def normalize(
        data: NDArray[Any], axis: Optional[Tuple[int, ...]], eps: float = 1e-7
    ) -> NDArray[np.float32]:
        data = data.astype("float32")
        data -= data.min(axis=axis, keepdims=True)
        data /= data.max(axis=axis, keepdims=True) + eps
        return data

    def to_2d_image(data: NDArray[Any], axes: Sequence[AnyAxis]):
        original_shape = data.shape
        original_axes = list(axes)
        data, axes = squeeze(data, axes)

        # take slice fom any batch or index axis if needed
        # and convert the first channel axis and take a slice from any additional channel axes
        slices: Tuple[slice, ...] = ()
        ndim = data.ndim
        ndim_need = 3 if any(isinstance(a, ChannelAxis) for a in axes) else 2
        has_c_axis = False
        for i, a in enumerate(axes):
            s = data.shape[i]
            assert s > 1
            if (
                isinstance(a, (BatchAxis, IndexInputAxis, IndexOutputAxis))
                and ndim > ndim_need
            ):
                data = data[slices + (slice(s // 2 - 1, s // 2),)]
                ndim -= 1
            elif isinstance(a, ChannelAxis):
                if has_c_axis:
                    # second channel axis
                    data = data[slices + (slice(0, 1),)]
                    ndim -= 1
                else:
                    has_c_axis = True
                    if s == 2:
                        # visualize two channels with cyan and magenta
                        data = np.concatenate(
                            [
                                data[slices + (slice(1, 2),)],
                                data[slices + (slice(0, 1),)],
                                (
                                    data[slices + (slice(0, 1),)]
                                    + data[slices + (slice(1, 2),)]
                                )
                                / 2,  # TODO: take maximum instead?
                            ],
                            axis=i,
                        )
                    elif data.shape[i] == 3:
                        pass  # visualize 3 channels as RGB
                    else:
                        # visualize first 3 channels as RGB
                        data = data[slices + (slice(3),)]

                    assert data.shape[i] == 3

            slices += (slice(None),)

        data, axes = squeeze(data, axes)
        assert len(axes) == ndim
        # take slice from z axis if needed
        slices = ()
        if ndim > ndim_need:
            for i, a in enumerate(axes):
                s = data.shape[i]
                if a.id == AxisId("z"):
                    data = data[slices + (slice(s // 2 - 1, s // 2),)]
                    data, axes = squeeze(data, axes)
                    ndim -= 1
                    break

            slices += (slice(None),)

        # take slice from any space or time axis
        slices = ()

        for i, a in enumerate(axes):
            if ndim <= ndim_need:
                break

            s = data.shape[i]
            assert s > 1
            if isinstance(
                a, (SpaceInputAxis, SpaceOutputAxis, TimeInputAxis, TimeOutputAxis)
            ):
                data = data[slices + (slice(s // 2 - 1, s // 2),)]
                ndim -= 1

            slices += (slice(None),)

        del slices
        data, axes = squeeze(data, axes)
        assert len(axes) == ndim

        if (has_c_axis and ndim != 3) or (not has_c_axis and ndim != 2):
            raise ValueError(
                f"Failed to construct cover image from shape {original_shape} with axes {[a.id for a in original_axes]}."
            )

        if not has_c_axis:
            assert ndim == 2
            data = np.repeat(data[:, :, None], 3, axis=2)
            axes.append(ChannelAxis(channel_names=list(map(Identifier, "RGB"))))
            ndim += 1

        assert ndim == 3

        # transpose axis order such that longest axis comes first...
        axis_order: List[int] = list(np.argsort(list(data.shape)))
        axis_order.reverse()
        # ... and channel axis is last
        c = [i for i in range(3) if isinstance(axes[i], ChannelAxis)][0]
        axis_order.append(axis_order.pop(c))
        axes = [axes[ao] for ao in axis_order]
        data = data.transpose(axis_order)

        # h, w = data.shape[:2]
        # if h / w  in (1.0 or 2.0):
        #     pass
        # elif h / w < 2:
        # TODO: enforce 2:1 or 1:1 aspect ratio for generated cover images

        norm_along = (
            tuple(i for i, a in enumerate(axes) if a.type in ("space", "time")) or None
        )
        # normalize the data and map to 8 bit
        data = normalize(data, norm_along)
        data = (data * 255).astype("uint8")

        return data

    def create_diagonal_split_image(im0: NDArray[Any], im1: NDArray[Any]):
        assert im0.dtype == im1.dtype == np.uint8
        assert im0.shape == im1.shape
        assert im0.ndim == 3
        N, M, C = im0.shape
        assert C == 3
        out = np.ones((N, M, C), dtype="uint8")
        for c in range(C):
            outc = np.tril(im0[..., c])
            mask = outc == 0
            outc[mask] = np.triu(im1[..., c])[mask]
            out[..., c] = outc

        return out

    if not inputs:
        raise ValueError("Missing test input tensor for cover generation.")

    if not outputs:
        raise ValueError("Missing test output tensor for cover generation.")

    ipt_descr, ipt = inputs[0]
    out_descr, out = outputs[0]

    ipt_img = to_2d_image(ipt, ipt_descr.axes)
    out_img = to_2d_image(out, out_descr.axes)

    cover_folder = Path(mkdtemp())
    if ipt_img.shape == out_img.shape:
        covers = [cover_folder / "cover.png"]
        imwrite(covers[0], create_diagonal_split_image(ipt_img, out_img))
    else:
        covers = [cover_folder / "input.png", cover_folder / "output.png"]
        imwrite(covers[0], ipt_img)
        imwrite(covers[1], out_img)

    return covers

validate_tensors ¤

validate_tensors(
    tensors: Mapping[
        TensorId, Tuple[TensorDescr, Optional[NDArray[Any]]]
    ],
    tensor_origin: Literal["test_tensor"],
)
Source code in src/bioimageio/spec/model/v0_5.py
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
def validate_tensors(
    tensors: Mapping[TensorId, Tuple[TensorDescr, Optional[NDArray[Any]]]],
    tensor_origin: Literal[
        "test_tensor"
    ],  # for more precise error messages, e.g. 'test_tensor'
):
    all_tensor_axes: Dict[TensorId, Dict[AxisId, Tuple[AnyAxis, Optional[int]]]] = {}

    def e_msg(d: TensorDescr):
        return f"{'inputs' if isinstance(d, InputTensorDescr) else 'outputs'}[{d.id}]"

    for descr, array in tensors.values():
        if array is None:
            axis_sizes = {a.id: None for a in descr.axes}
        else:
            try:
                axis_sizes = descr.get_axis_sizes_for_array(array)
            except ValueError as e:
                raise ValueError(f"{e_msg(descr)} {e}")

        all_tensor_axes[descr.id] = {a.id: (a, axis_sizes[a.id]) for a in descr.axes}

    for descr, array in tensors.values():
        if array is None:
            continue

        if descr.dtype in ("float32", "float64"):
            invalid_test_tensor_dtype = array.dtype.name not in (
                "float32",
                "float64",
                "uint8",
                "int8",
                "uint16",
                "int16",
                "uint32",
                "int32",
                "uint64",
                "int64",
            )
        else:
            invalid_test_tensor_dtype = array.dtype.name != descr.dtype

        if invalid_test_tensor_dtype:
            raise ValueError(
                f"{e_msg(descr)}.{tensor_origin}.dtype '{array.dtype.name}' does not"
                + f" match described dtype '{descr.dtype}'"
            )

        if array.min() > -1e-4 and array.max() < 1e-4:
            raise ValueError(
                "Output values are too small for reliable testing."
                + f" Values <-1e5 or >=1e5 must be present in {tensor_origin}"
            )

        for a in descr.axes:
            actual_size = all_tensor_axes[descr.id][a.id][1]
            if actual_size is None:
                continue

            if a.size is None:
                continue

            if isinstance(a.size, int):
                if actual_size != a.size:
                    raise ValueError(
                        f"{e_msg(descr)}.{tensor_origin}: axis '{a.id}' "
                        + f"has incompatible size {actual_size}, expected {a.size}"
                    )
            elif isinstance(a.size, ParameterizedSize):
                _ = a.size.validate_size(actual_size)
            elif isinstance(a.size, DataDependentSize):
                _ = a.size.validate_size(actual_size)
            elif isinstance(a.size, SizeReference):
                ref_tensor_axes = all_tensor_axes.get(a.size.tensor_id)
                if ref_tensor_axes is None:
                    raise ValueError(
                        f"{e_msg(descr)}.axes[{a.id}].size.tensor_id: Unknown tensor"
                        + f" reference '{a.size.tensor_id}'"
                    )

                ref_axis, ref_size = ref_tensor_axes.get(a.size.axis_id, (None, None))
                if ref_axis is None or ref_size is None:
                    raise ValueError(
                        f"{e_msg(descr)}.axes[{a.id}].size.axis_id: Unknown tensor axis"
                        + f" reference '{a.size.tensor_id}.{a.size.axis_id}"
                    )

                if a.unit != ref_axis.unit:
                    raise ValueError(
                        f"{e_msg(descr)}.axes[{a.id}].size: `SizeReference` requires"
                        + " axis and reference axis to have the same `unit`, but"
                        + f" {a.unit}!={ref_axis.unit}"
                    )

                if actual_size != (
                    expected_size := (
                        ref_size * ref_axis.scale / a.scale + a.size.offset
                    )
                ):
                    raise ValueError(
                        f"{e_msg(descr)}.{tensor_origin}: axis '{a.id}' of size"
                        + f" {actual_size} invalid for referenced size {ref_size};"
                        + f" expected {expected_size}"
                    )
            else:
                assert_never(a.size)